
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
(FPCore (x) :precision binary64 (/ x (+ 1.0 (sqrt (+ x 1.0)))))
double code(double x) {
return x / (1.0 + sqrt((x + 1.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (1.0d0 + sqrt((x + 1.0d0)))
end function
public static double code(double x) {
return x / (1.0 + Math.sqrt((x + 1.0)));
}
def code(x): return x / (1.0 + math.sqrt((x + 1.0)))
function code(x) return Float64(x / Float64(1.0 + sqrt(Float64(x + 1.0)))) end
function tmp = code(x) tmp = x / (1.0 + sqrt((x + 1.0))); end
code[x_] := N[(x / N[(1.0 + N[Sqrt[N[(x + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + \sqrt{x + 1}}
\end{array}
Initial program 99.8%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ -0.125 (* x 0.0625))) (t_1 (* x t_0)))
(if (<= x 3.0)
(/
x
(+
(/ x 2.0)
(/ 1.0 (/ (+ (* t_0 (* x x)) -2.0) (+ (* (* x x) (* t_1 t_1)) -4.0)))))
(+ (sqrt x) -1.0))))
double code(double x) {
double t_0 = -0.125 + (x * 0.0625);
double t_1 = x * t_0;
double tmp;
if (x <= 3.0) {
tmp = x / ((x / 2.0) + (1.0 / (((t_0 * (x * x)) + -2.0) / (((x * x) * (t_1 * t_1)) + -4.0))));
} else {
tmp = sqrt(x) + -1.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = (-0.125d0) + (x * 0.0625d0)
t_1 = x * t_0
if (x <= 3.0d0) then
tmp = x / ((x / 2.0d0) + (1.0d0 / (((t_0 * (x * x)) + (-2.0d0)) / (((x * x) * (t_1 * t_1)) + (-4.0d0)))))
else
tmp = sqrt(x) + (-1.0d0)
end if
code = tmp
end function
public static double code(double x) {
double t_0 = -0.125 + (x * 0.0625);
double t_1 = x * t_0;
double tmp;
if (x <= 3.0) {
tmp = x / ((x / 2.0) + (1.0 / (((t_0 * (x * x)) + -2.0) / (((x * x) * (t_1 * t_1)) + -4.0))));
} else {
tmp = Math.sqrt(x) + -1.0;
}
return tmp;
}
def code(x): t_0 = -0.125 + (x * 0.0625) t_1 = x * t_0 tmp = 0 if x <= 3.0: tmp = x / ((x / 2.0) + (1.0 / (((t_0 * (x * x)) + -2.0) / (((x * x) * (t_1 * t_1)) + -4.0)))) else: tmp = math.sqrt(x) + -1.0 return tmp
function code(x) t_0 = Float64(-0.125 + Float64(x * 0.0625)) t_1 = Float64(x * t_0) tmp = 0.0 if (x <= 3.0) tmp = Float64(x / Float64(Float64(x / 2.0) + Float64(1.0 / Float64(Float64(Float64(t_0 * Float64(x * x)) + -2.0) / Float64(Float64(Float64(x * x) * Float64(t_1 * t_1)) + -4.0))))); else tmp = Float64(sqrt(x) + -1.0); end return tmp end
function tmp_2 = code(x) t_0 = -0.125 + (x * 0.0625); t_1 = x * t_0; tmp = 0.0; if (x <= 3.0) tmp = x / ((x / 2.0) + (1.0 / (((t_0 * (x * x)) + -2.0) / (((x * x) * (t_1 * t_1)) + -4.0)))); else tmp = sqrt(x) + -1.0; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(-0.125 + N[(x * 0.0625), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x * t$95$0), $MachinePrecision]}, If[LessEqual[x, 3.0], N[(x / N[(N[(x / 2.0), $MachinePrecision] + N[(1.0 / N[(N[(N[(t$95$0 * N[(x * x), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision] / N[(N[(N[(x * x), $MachinePrecision] * N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision] + -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Sqrt[x], $MachinePrecision] + -1.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := -0.125 + x \cdot 0.0625\\
t_1 := x \cdot t\_0\\
\mathbf{if}\;x \leq 3:\\
\;\;\;\;\frac{x}{\frac{x}{2} + \frac{1}{\frac{t\_0 \cdot \left(x \cdot x\right) + -2}{\left(x \cdot x\right) \cdot \left(t\_1 \cdot t\_1\right) + -4}}}\\
\mathbf{else}:\\
\;\;\;\;\sqrt{x} + -1\\
\end{array}
\end{array}
if x < 3Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.2%
Simplified99.2%
distribute-rgt-inN/A
associate-+l+N/A
+-lowering-+.f64N/A
*-commutativeN/A
metadata-evalN/A
div-invN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.2%
Applied egg-rr99.2%
flip-+N/A
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
sub-negN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
metadata-evalN/A
Applied egg-rr99.2%
if 3 < x Initial program 99.3%
Taylor expanded in x around inf
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
sqrt-lowering-sqrt.f6497.8%
Simplified97.8%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ -0.125 (* x 0.0625))) (t_1 (* x t_0)))
(if (<= x 3.7)
(/
x
(+
(/ x 2.0)
(/ 1.0 (/ (+ (* t_0 (* x x)) -2.0) (+ (* (* x x) (* t_1 t_1)) -4.0)))))
(sqrt x))))
double code(double x) {
double t_0 = -0.125 + (x * 0.0625);
double t_1 = x * t_0;
double tmp;
if (x <= 3.7) {
tmp = x / ((x / 2.0) + (1.0 / (((t_0 * (x * x)) + -2.0) / (((x * x) * (t_1 * t_1)) + -4.0))));
} else {
tmp = sqrt(x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = (-0.125d0) + (x * 0.0625d0)
t_1 = x * t_0
if (x <= 3.7d0) then
tmp = x / ((x / 2.0d0) + (1.0d0 / (((t_0 * (x * x)) + (-2.0d0)) / (((x * x) * (t_1 * t_1)) + (-4.0d0)))))
else
tmp = sqrt(x)
end if
code = tmp
end function
public static double code(double x) {
double t_0 = -0.125 + (x * 0.0625);
double t_1 = x * t_0;
double tmp;
if (x <= 3.7) {
tmp = x / ((x / 2.0) + (1.0 / (((t_0 * (x * x)) + -2.0) / (((x * x) * (t_1 * t_1)) + -4.0))));
} else {
tmp = Math.sqrt(x);
}
return tmp;
}
def code(x): t_0 = -0.125 + (x * 0.0625) t_1 = x * t_0 tmp = 0 if x <= 3.7: tmp = x / ((x / 2.0) + (1.0 / (((t_0 * (x * x)) + -2.0) / (((x * x) * (t_1 * t_1)) + -4.0)))) else: tmp = math.sqrt(x) return tmp
function code(x) t_0 = Float64(-0.125 + Float64(x * 0.0625)) t_1 = Float64(x * t_0) tmp = 0.0 if (x <= 3.7) tmp = Float64(x / Float64(Float64(x / 2.0) + Float64(1.0 / Float64(Float64(Float64(t_0 * Float64(x * x)) + -2.0) / Float64(Float64(Float64(x * x) * Float64(t_1 * t_1)) + -4.0))))); else tmp = sqrt(x); end return tmp end
function tmp_2 = code(x) t_0 = -0.125 + (x * 0.0625); t_1 = x * t_0; tmp = 0.0; if (x <= 3.7) tmp = x / ((x / 2.0) + (1.0 / (((t_0 * (x * x)) + -2.0) / (((x * x) * (t_1 * t_1)) + -4.0)))); else tmp = sqrt(x); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(-0.125 + N[(x * 0.0625), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x * t$95$0), $MachinePrecision]}, If[LessEqual[x, 3.7], N[(x / N[(N[(x / 2.0), $MachinePrecision] + N[(1.0 / N[(N[(N[(t$95$0 * N[(x * x), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision] / N[(N[(N[(x * x), $MachinePrecision] * N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision] + -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Sqrt[x], $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := -0.125 + x \cdot 0.0625\\
t_1 := x \cdot t\_0\\
\mathbf{if}\;x \leq 3.7:\\
\;\;\;\;\frac{x}{\frac{x}{2} + \frac{1}{\frac{t\_0 \cdot \left(x \cdot x\right) + -2}{\left(x \cdot x\right) \cdot \left(t\_1 \cdot t\_1\right) + -4}}}\\
\mathbf{else}:\\
\;\;\;\;\sqrt{x}\\
\end{array}
\end{array}
if x < 3.7000000000000002Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.2%
Simplified99.2%
distribute-rgt-inN/A
associate-+l+N/A
+-lowering-+.f64N/A
*-commutativeN/A
metadata-evalN/A
div-invN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.2%
Applied egg-rr99.2%
flip-+N/A
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
sub-negN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
metadata-evalN/A
Applied egg-rr99.2%
if 3.7000000000000002 < x Initial program 99.3%
Taylor expanded in x around inf
sqrt-lowering-sqrt.f6496.0%
Simplified96.0%
(FPCore (x) :precision binary64 (* x (/ 1.0 (+ 2.0 (/ x 2.0)))))
double code(double x) {
return x * (1.0 / (2.0 + (x / 2.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 / (2.0d0 + (x / 2.0d0)))
end function
public static double code(double x) {
return x * (1.0 / (2.0 + (x / 2.0)));
}
def code(x): return x * (1.0 / (2.0 + (x / 2.0)))
function code(x) return Float64(x * Float64(1.0 / Float64(2.0 + Float64(x / 2.0)))) end
function tmp = code(x) tmp = x * (1.0 / (2.0 + (x / 2.0))); end
code[x_] := N[(x * N[(1.0 / N[(2.0 + N[(x / 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{1}{2 + \frac{x}{2}}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f6471.5%
Simplified71.5%
clear-numN/A
associate-/r/N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
metadata-evalN/A
div-invN/A
/-lowering-/.f6471.5%
Applied egg-rr71.5%
Final simplification71.5%
(FPCore (x) :precision binary64 (/ x (+ 2.0 (* x 0.5))))
double code(double x) {
return x / (2.0 + (x * 0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / (2.0d0 + (x * 0.5d0))
end function
public static double code(double x) {
return x / (2.0 + (x * 0.5));
}
def code(x): return x / (2.0 + (x * 0.5))
function code(x) return Float64(x / Float64(2.0 + Float64(x * 0.5))) end
function tmp = code(x) tmp = x / (2.0 + (x * 0.5)); end
code[x_] := N[(x / N[(2.0 + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{2 + x \cdot 0.5}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f6471.5%
Simplified71.5%
Final simplification71.5%
(FPCore (x) :precision binary64 (/ 1.0 (+ 0.5 (/ 2.0 x))))
double code(double x) {
return 1.0 / (0.5 + (2.0 / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / (0.5d0 + (2.0d0 / x))
end function
public static double code(double x) {
return 1.0 / (0.5 + (2.0 / x));
}
def code(x): return 1.0 / (0.5 + (2.0 / x))
function code(x) return Float64(1.0 / Float64(0.5 + Float64(2.0 / x))) end
function tmp = code(x) tmp = 1.0 / (0.5 + (2.0 / x)); end
code[x_] := N[(1.0 / N[(0.5 + N[(2.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{0.5 + \frac{2}{x}}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f6471.5%
Simplified71.5%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
metadata-evalN/A
div-invN/A
/-lowering-/.f6471.4%
Applied egg-rr71.4%
Taylor expanded in x around inf
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f6471.4%
Simplified71.4%
(FPCore (x) :precision binary64 (/ x 2.0))
double code(double x) {
return x / 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x / 2.0d0
end function
public static double code(double x) {
return x / 2.0;
}
def code(x): return x / 2.0
function code(x) return Float64(x / 2.0) end
function tmp = code(x) tmp = x / 2.0; end
code[x_] := N[(x / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{2}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
Simplified70.4%
(FPCore (x) :precision binary64 2.0)
double code(double x) {
return 2.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0
end function
public static double code(double x) {
return 2.0;
}
def code(x): return 2.0
function code(x) return 2.0 end
function tmp = code(x) tmp = 2.0; end
code[x_] := 2.0
\begin{array}{l}
\\
2
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f6471.5%
Simplified71.5%
Taylor expanded in x around inf
Simplified4.8%
herbie shell --seed 2024155
(FPCore (x)
:name "Numeric.Log:$clog1p from log-domain-0.10.2.1, B"
:precision binary64
(/ x (+ 1.0 (sqrt (+ x 1.0)))))