
(FPCore (x) :precision binary64 (- x (/ (+ 2.30753 (* x 0.27061)) (+ 1.0 (* (+ 0.99229 (* x 0.04481)) x)))))
double code(double x) {
return x - ((2.30753 + (x * 0.27061)) / (1.0 + ((0.99229 + (x * 0.04481)) * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x - ((2.30753d0 + (x * 0.27061d0)) / (1.0d0 + ((0.99229d0 + (x * 0.04481d0)) * x)))
end function
public static double code(double x) {
return x - ((2.30753 + (x * 0.27061)) / (1.0 + ((0.99229 + (x * 0.04481)) * x)));
}
def code(x): return x - ((2.30753 + (x * 0.27061)) / (1.0 + ((0.99229 + (x * 0.04481)) * x)))
function code(x) return Float64(x - Float64(Float64(2.30753 + Float64(x * 0.27061)) / Float64(1.0 + Float64(Float64(0.99229 + Float64(x * 0.04481)) * x)))) end
function tmp = code(x) tmp = x - ((2.30753 + (x * 0.27061)) / (1.0 + ((0.99229 + (x * 0.04481)) * x))); end
code[x_] := N[(x - N[(N[(2.30753 + N[(x * 0.27061), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(N[(0.99229 + N[(x * 0.04481), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{2.30753 + x \cdot 0.27061}{1 + \left(0.99229 + x \cdot 0.04481\right) \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- x (/ (+ 2.30753 (* x 0.27061)) (+ 1.0 (* (+ 0.99229 (* x 0.04481)) x)))))
double code(double x) {
return x - ((2.30753 + (x * 0.27061)) / (1.0 + ((0.99229 + (x * 0.04481)) * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x - ((2.30753d0 + (x * 0.27061d0)) / (1.0d0 + ((0.99229d0 + (x * 0.04481d0)) * x)))
end function
public static double code(double x) {
return x - ((2.30753 + (x * 0.27061)) / (1.0 + ((0.99229 + (x * 0.04481)) * x)));
}
def code(x): return x - ((2.30753 + (x * 0.27061)) / (1.0 + ((0.99229 + (x * 0.04481)) * x)))
function code(x) return Float64(x - Float64(Float64(2.30753 + Float64(x * 0.27061)) / Float64(1.0 + Float64(Float64(0.99229 + Float64(x * 0.04481)) * x)))) end
function tmp = code(x) tmp = x - ((2.30753 + (x * 0.27061)) / (1.0 + ((0.99229 + (x * 0.04481)) * x))); end
code[x_] := N[(x - N[(N[(2.30753 + N[(x * 0.27061), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(N[(0.99229 + N[(x * 0.04481), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{2.30753 + x \cdot 0.27061}{1 + \left(0.99229 + x \cdot 0.04481\right) \cdot x}
\end{array}
(FPCore (x) :precision binary64 (- x (/ (/ 1.0 (+ 1.0 (* x (fma x 0.04481 0.99229)))) (/ 1.0 (fma x 0.27061 2.30753)))))
double code(double x) {
return x - ((1.0 / (1.0 + (x * fma(x, 0.04481, 0.99229)))) / (1.0 / fma(x, 0.27061, 2.30753)));
}
function code(x) return Float64(x - Float64(Float64(1.0 / Float64(1.0 + Float64(x * fma(x, 0.04481, 0.99229)))) / Float64(1.0 / fma(x, 0.27061, 2.30753)))) end
code[x_] := N[(x - N[(N[(1.0 / N[(1.0 + N[(x * N[(x * 0.04481 + 0.99229), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 / N[(x * 0.27061 + 2.30753), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{\frac{1}{1 + x \cdot \mathsf{fma}\left(x, 0.04481, 0.99229\right)}}{\frac{1}{\mathsf{fma}\left(x, 0.27061, 2.30753\right)}}
\end{array}
Initial program 100.0%
clear-numN/A
div-invN/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64100.0
Applied egg-rr100.0%
+-lowering-+.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64100.0
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(let* ((t_0
(+
x
(/
(+ 2.30753 (* x 0.27061))
(- -1.0 (* x (+ 0.99229 (* x 0.04481))))))))
(if (<= t_0 -10000000.0) x (if (<= t_0 -2.0) -2.30753 x))))
double code(double x) {
double t_0 = x + ((2.30753 + (x * 0.27061)) / (-1.0 - (x * (0.99229 + (x * 0.04481)))));
double tmp;
if (t_0 <= -10000000.0) {
tmp = x;
} else if (t_0 <= -2.0) {
tmp = -2.30753;
} else {
tmp = x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = x + ((2.30753d0 + (x * 0.27061d0)) / ((-1.0d0) - (x * (0.99229d0 + (x * 0.04481d0)))))
if (t_0 <= (-10000000.0d0)) then
tmp = x
else if (t_0 <= (-2.0d0)) then
tmp = -2.30753d0
else
tmp = x
end if
code = tmp
end function
public static double code(double x) {
double t_0 = x + ((2.30753 + (x * 0.27061)) / (-1.0 - (x * (0.99229 + (x * 0.04481)))));
double tmp;
if (t_0 <= -10000000.0) {
tmp = x;
} else if (t_0 <= -2.0) {
tmp = -2.30753;
} else {
tmp = x;
}
return tmp;
}
def code(x): t_0 = x + ((2.30753 + (x * 0.27061)) / (-1.0 - (x * (0.99229 + (x * 0.04481))))) tmp = 0 if t_0 <= -10000000.0: tmp = x elif t_0 <= -2.0: tmp = -2.30753 else: tmp = x return tmp
function code(x) t_0 = Float64(x + Float64(Float64(2.30753 + Float64(x * 0.27061)) / Float64(-1.0 - Float64(x * Float64(0.99229 + Float64(x * 0.04481)))))) tmp = 0.0 if (t_0 <= -10000000.0) tmp = x; elseif (t_0 <= -2.0) tmp = -2.30753; else tmp = x; end return tmp end
function tmp_2 = code(x) t_0 = x + ((2.30753 + (x * 0.27061)) / (-1.0 - (x * (0.99229 + (x * 0.04481))))); tmp = 0.0; if (t_0 <= -10000000.0) tmp = x; elseif (t_0 <= -2.0) tmp = -2.30753; else tmp = x; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(x + N[(N[(2.30753 + N[(x * 0.27061), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - N[(x * N[(0.99229 + N[(x * 0.04481), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -10000000.0], x, If[LessEqual[t$95$0, -2.0], -2.30753, x]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x + \frac{2.30753 + x \cdot 0.27061}{-1 - x \cdot \left(0.99229 + x \cdot 0.04481\right)}\\
\mathbf{if}\;t\_0 \leq -10000000:\\
\;\;\;\;x\\
\mathbf{elif}\;t\_0 \leq -2:\\
\;\;\;\;-2.30753\\
\mathbf{else}:\\
\;\;\;\;x\\
\end{array}
\end{array}
if (-.f64 x (/.f64 (+.f64 #s(literal 230753/100000 binary64) (*.f64 x #s(literal 27061/100000 binary64))) (+.f64 #s(literal 1 binary64) (*.f64 (+.f64 #s(literal 99229/100000 binary64) (*.f64 x #s(literal 4481/100000 binary64))) x)))) < -1e7 or -2 < (-.f64 x (/.f64 (+.f64 #s(literal 230753/100000 binary64) (*.f64 x #s(literal 27061/100000 binary64))) (+.f64 #s(literal 1 binary64) (*.f64 (+.f64 #s(literal 99229/100000 binary64) (*.f64 x #s(literal 4481/100000 binary64))) x)))) Initial program 100.0%
Taylor expanded in x around inf
Simplified99.9%
if -1e7 < (-.f64 x (/.f64 (+.f64 #s(literal 230753/100000 binary64) (*.f64 x #s(literal 27061/100000 binary64))) (+.f64 #s(literal 1 binary64) (*.f64 (+.f64 #s(literal 99229/100000 binary64) (*.f64 x #s(literal 4481/100000 binary64))) x)))) < -2Initial program 100.0%
Taylor expanded in x around 0
Simplified97.6%
Final simplification98.7%
(FPCore (x) :precision binary64 (+ x (/ (/ -1.0 (fma x (fma x 0.04481 0.99229) 1.0)) (/ 1.0 (fma x 0.27061 2.30753)))))
double code(double x) {
return x + ((-1.0 / fma(x, fma(x, 0.04481, 0.99229), 1.0)) / (1.0 / fma(x, 0.27061, 2.30753)));
}
function code(x) return Float64(x + Float64(Float64(-1.0 / fma(x, fma(x, 0.04481, 0.99229), 1.0)) / Float64(1.0 / fma(x, 0.27061, 2.30753)))) end
code[x_] := N[(x + N[(N[(-1.0 / N[(x * N[(x * 0.04481 + 0.99229), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] / N[(1.0 / N[(x * 0.27061 + 2.30753), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\frac{-1}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.04481, 0.99229\right), 1\right)}}{\frac{1}{\mathsf{fma}\left(x, 0.27061, 2.30753\right)}}
\end{array}
Initial program 100.0%
clear-numN/A
div-invN/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64100.0
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (- x (/ (fma x 0.27061 2.30753) (fma x (fma x 0.04481 0.99229) 1.0))))
double code(double x) {
return x - (fma(x, 0.27061, 2.30753) / fma(x, fma(x, 0.04481, 0.99229), 1.0));
}
function code(x) return Float64(x - Float64(fma(x, 0.27061, 2.30753) / fma(x, fma(x, 0.04481, 0.99229), 1.0))) end
code[x_] := N[(x - N[(N[(x * 0.27061 + 2.30753), $MachinePrecision] / N[(x * N[(x * 0.04481 + 0.99229), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{\mathsf{fma}\left(x, 0.27061, 2.30753\right)}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.04481, 0.99229\right), 1\right)}
\end{array}
Initial program 100.0%
--lowering--.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64100.0
Applied egg-rr100.0%
(FPCore (x) :precision binary64 (- x (/ (+ 2.30753 (* x 0.27061)) (fma x 0.99229 1.0))))
double code(double x) {
return x - ((2.30753 + (x * 0.27061)) / fma(x, 0.99229, 1.0));
}
function code(x) return Float64(x - Float64(Float64(2.30753 + Float64(x * 0.27061)) / fma(x, 0.99229, 1.0))) end
code[x_] := N[(x - N[(N[(2.30753 + N[(x * 0.27061), $MachinePrecision]), $MachinePrecision] / N[(x * 0.99229 + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - \frac{2.30753 + x \cdot 0.27061}{\mathsf{fma}\left(x, 0.99229, 1\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
metadata-evalN/A
lft-mult-inverseN/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-eval99.0
Simplified99.0%
(FPCore (x) :precision binary64 (- x 2.30753))
double code(double x) {
return x - 2.30753;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x - 2.30753d0
end function
public static double code(double x) {
return x - 2.30753;
}
def code(x): return x - 2.30753
function code(x) return Float64(x - 2.30753) end
function tmp = code(x) tmp = x - 2.30753; end
code[x_] := N[(x - 2.30753), $MachinePrecision]
\begin{array}{l}
\\
x - 2.30753
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified98.2%
(FPCore (x) :precision binary64 -2.30753)
double code(double x) {
return -2.30753;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -2.30753d0
end function
public static double code(double x) {
return -2.30753;
}
def code(x): return -2.30753
function code(x) return -2.30753 end
function tmp = code(x) tmp = -2.30753; end
code[x_] := -2.30753
\begin{array}{l}
\\
-2.30753
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified52.5%
herbie shell --seed 2024196
(FPCore (x)
:name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, D"
:precision binary64
(- x (/ (+ 2.30753 (* x 0.27061)) (+ 1.0 (* (+ 0.99229 (* x 0.04481)) x)))))