
(FPCore (x) :precision binary64 (- (/ (+ 2.30753 (* x 0.27061)) (+ 1.0 (* x (+ 0.99229 (* x 0.04481))))) x))
double code(double x) {
return ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((2.30753d0 + (x * 0.27061d0)) / (1.0d0 + (x * (0.99229d0 + (x * 0.04481d0))))) - x
end function
public static double code(double x) {
return ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x;
}
def code(x): return ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x
function code(x) return Float64(Float64(Float64(2.30753 + Float64(x * 0.27061)) / Float64(1.0 + Float64(x * Float64(0.99229 + Float64(x * 0.04481))))) - x) end
function tmp = code(x) tmp = ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x; end
code[x_] := N[(N[(N[(2.30753 + N[(x * 0.27061), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(x * N[(0.99229 + N[(x * 0.04481), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\frac{2.30753 + x \cdot 0.27061}{1 + x \cdot \left(0.99229 + x \cdot 0.04481\right)} - x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ (+ 2.30753 (* x 0.27061)) (+ 1.0 (* x (+ 0.99229 (* x 0.04481))))) x))
double code(double x) {
return ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((2.30753d0 + (x * 0.27061d0)) / (1.0d0 + (x * (0.99229d0 + (x * 0.04481d0))))) - x
end function
public static double code(double x) {
return ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x;
}
def code(x): return ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x
function code(x) return Float64(Float64(Float64(2.30753 + Float64(x * 0.27061)) / Float64(1.0 + Float64(x * Float64(0.99229 + Float64(x * 0.04481))))) - x) end
function tmp = code(x) tmp = ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x; end
code[x_] := N[(N[(N[(2.30753 + N[(x * 0.27061), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(x * N[(0.99229 + N[(x * 0.04481), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\frac{2.30753 + x \cdot 0.27061}{1 + x \cdot \left(0.99229 + x \cdot 0.04481\right)} - x
\end{array}
(FPCore (x) :precision binary64 (- (/ (/ 1.0 (+ 1.0 (* x (fma x 0.04481 0.99229)))) (/ 1.0 (fma x 0.27061 2.30753))) x))
double code(double x) {
return ((1.0 / (1.0 + (x * fma(x, 0.04481, 0.99229)))) / (1.0 / fma(x, 0.27061, 2.30753))) - x;
}
function code(x) return Float64(Float64(Float64(1.0 / Float64(1.0 + Float64(x * fma(x, 0.04481, 0.99229)))) / Float64(1.0 / fma(x, 0.27061, 2.30753))) - x) end
code[x_] := N[(N[(N[(1.0 / N[(1.0 + N[(x * N[(x * 0.04481 + 0.99229), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 / N[(x * 0.27061 + 2.30753), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1}{1 + x \cdot \mathsf{fma}\left(x, 0.04481, 0.99229\right)}}{\frac{1}{\mathsf{fma}\left(x, 0.27061, 2.30753\right)}} - x
\end{array}
Initial program 100.0%
clear-numN/A
associate-/r/N/A
flip-+N/A
clear-numN/A
clear-numN/A
flip-+N/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64100.0
Applied egg-rr100.0%
+-lowering-+.f64N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64100.0
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(let* ((t_0
(-
(/ (+ 2.30753 (* x 0.27061)) (+ 1.0 (* x (+ 0.99229 (* x 0.04481)))))
x)))
(if (<= t_0 -1000000000.0) (- 0.0 x) (if (<= t_0 5.0) 2.30753 (- 0.0 x)))))
double code(double x) {
double t_0 = ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x;
double tmp;
if (t_0 <= -1000000000.0) {
tmp = 0.0 - x;
} else if (t_0 <= 5.0) {
tmp = 2.30753;
} else {
tmp = 0.0 - x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = ((2.30753d0 + (x * 0.27061d0)) / (1.0d0 + (x * (0.99229d0 + (x * 0.04481d0))))) - x
if (t_0 <= (-1000000000.0d0)) then
tmp = 0.0d0 - x
else if (t_0 <= 5.0d0) then
tmp = 2.30753d0
else
tmp = 0.0d0 - x
end if
code = tmp
end function
public static double code(double x) {
double t_0 = ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x;
double tmp;
if (t_0 <= -1000000000.0) {
tmp = 0.0 - x;
} else if (t_0 <= 5.0) {
tmp = 2.30753;
} else {
tmp = 0.0 - x;
}
return tmp;
}
def code(x): t_0 = ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x tmp = 0 if t_0 <= -1000000000.0: tmp = 0.0 - x elif t_0 <= 5.0: tmp = 2.30753 else: tmp = 0.0 - x return tmp
function code(x) t_0 = Float64(Float64(Float64(2.30753 + Float64(x * 0.27061)) / Float64(1.0 + Float64(x * Float64(0.99229 + Float64(x * 0.04481))))) - x) tmp = 0.0 if (t_0 <= -1000000000.0) tmp = Float64(0.0 - x); elseif (t_0 <= 5.0) tmp = 2.30753; else tmp = Float64(0.0 - x); end return tmp end
function tmp_2 = code(x) t_0 = ((2.30753 + (x * 0.27061)) / (1.0 + (x * (0.99229 + (x * 0.04481))))) - x; tmp = 0.0; if (t_0 <= -1000000000.0) tmp = 0.0 - x; elseif (t_0 <= 5.0) tmp = 2.30753; else tmp = 0.0 - x; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(N[(N[(2.30753 + N[(x * 0.27061), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(x * N[(0.99229 + N[(x * 0.04481), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]}, If[LessEqual[t$95$0, -1000000000.0], N[(0.0 - x), $MachinePrecision], If[LessEqual[t$95$0, 5.0], 2.30753, N[(0.0 - x), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2.30753 + x \cdot 0.27061}{1 + x \cdot \left(0.99229 + x \cdot 0.04481\right)} - x\\
\mathbf{if}\;t\_0 \leq -1000000000:\\
\;\;\;\;0 - x\\
\mathbf{elif}\;t\_0 \leq 5:\\
\;\;\;\;2.30753\\
\mathbf{else}:\\
\;\;\;\;0 - x\\
\end{array}
\end{array}
if (-.f64 (/.f64 (+.f64 #s(literal 230753/100000 binary64) (*.f64 x #s(literal 27061/100000 binary64))) (+.f64 #s(literal 1 binary64) (*.f64 x (+.f64 #s(literal 99229/100000 binary64) (*.f64 x #s(literal 4481/100000 binary64)))))) x) < -1e9 or 5 < (-.f64 (/.f64 (+.f64 #s(literal 230753/100000 binary64) (*.f64 x #s(literal 27061/100000 binary64))) (+.f64 #s(literal 1 binary64) (*.f64 x (+.f64 #s(literal 99229/100000 binary64) (*.f64 x #s(literal 4481/100000 binary64)))))) x) Initial program 100.0%
Taylor expanded in x around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.9
Simplified99.9%
sub0-negN/A
neg-lowering-neg.f6499.9
Applied egg-rr99.9%
if -1e9 < (-.f64 (/.f64 (+.f64 #s(literal 230753/100000 binary64) (*.f64 x #s(literal 27061/100000 binary64))) (+.f64 #s(literal 1 binary64) (*.f64 x (+.f64 #s(literal 99229/100000 binary64) (*.f64 x #s(literal 4481/100000 binary64)))))) x) < 5Initial program 100.0%
Taylor expanded in x around 0
Simplified97.6%
Final simplification98.7%
(FPCore (x) :precision binary64 (- (/ (/ 1.0 (fma x (fma x 0.04481 0.99229) 1.0)) (/ 1.0 (fma x 0.27061 2.30753))) x))
double code(double x) {
return ((1.0 / fma(x, fma(x, 0.04481, 0.99229), 1.0)) / (1.0 / fma(x, 0.27061, 2.30753))) - x;
}
function code(x) return Float64(Float64(Float64(1.0 / fma(x, fma(x, 0.04481, 0.99229), 1.0)) / Float64(1.0 / fma(x, 0.27061, 2.30753))) - x) end
code[x_] := N[(N[(N[(1.0 / N[(x * N[(x * 0.04481 + 0.99229), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] / N[(1.0 / N[(x * 0.27061 + 2.30753), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.04481, 0.99229\right), 1\right)}}{\frac{1}{\mathsf{fma}\left(x, 0.27061, 2.30753\right)}} - x
\end{array}
Initial program 100.0%
clear-numN/A
associate-/r/N/A
flip-+N/A
clear-numN/A
clear-numN/A
flip-+N/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64100.0
Applied egg-rr100.0%
(FPCore (x) :precision binary64 (- (/ (fma x 0.27061 2.30753) (fma x (fma x 0.04481 0.99229) 1.0)) x))
double code(double x) {
return (fma(x, 0.27061, 2.30753) / fma(x, fma(x, 0.04481, 0.99229), 1.0)) - x;
}
function code(x) return Float64(Float64(fma(x, 0.27061, 2.30753) / fma(x, fma(x, 0.04481, 0.99229), 1.0)) - x) end
code[x_] := N[(N[(N[(x * 0.27061 + 2.30753), $MachinePrecision] / N[(x * N[(x * 0.04481 + 0.99229), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(x, 0.27061, 2.30753\right)}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.04481, 0.99229\right), 1\right)} - x
\end{array}
Initial program 100.0%
--lowering--.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64100.0
Applied egg-rr100.0%
(FPCore (x) :precision binary64 (- (/ (+ 2.30753 (* x 0.27061)) (fma x 0.99229 1.0)) x))
double code(double x) {
return ((2.30753 + (x * 0.27061)) / fma(x, 0.99229, 1.0)) - x;
}
function code(x) return Float64(Float64(Float64(2.30753 + Float64(x * 0.27061)) / fma(x, 0.99229, 1.0)) - x) end
code[x_] := N[(N[(N[(2.30753 + N[(x * 0.27061), $MachinePrecision]), $MachinePrecision] / N[(x * 0.99229 + 1.0), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l}
\\
\frac{2.30753 + x \cdot 0.27061}{\mathsf{fma}\left(x, 0.99229, 1\right)} - x
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
metadata-evalN/A
lft-mult-inverseN/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-eval99.0
Simplified99.0%
(FPCore (x) :precision binary64 (- 2.30753 x))
double code(double x) {
return 2.30753 - x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.30753d0 - x
end function
public static double code(double x) {
return 2.30753 - x;
}
def code(x): return 2.30753 - x
function code(x) return Float64(2.30753 - x) end
function tmp = code(x) tmp = 2.30753 - x; end
code[x_] := N[(2.30753 - x), $MachinePrecision]
\begin{array}{l}
\\
2.30753 - x
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified98.2%
(FPCore (x) :precision binary64 2.30753)
double code(double x) {
return 2.30753;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.30753d0
end function
public static double code(double x) {
return 2.30753;
}
def code(x): return 2.30753
function code(x) return 2.30753 end
function tmp = code(x) tmp = 2.30753; end
code[x_] := 2.30753
\begin{array}{l}
\\
2.30753
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
Simplified52.5%
herbie shell --seed 2024196
(FPCore (x)
:name "Numeric.SpecFunctions:invIncompleteGamma from math-functions-0.1.5.2, C"
:precision binary64
(- (/ (+ 2.30753 (* x 0.27061)) (+ 1.0 (* x (+ 0.99229 (* x 0.04481))))) x))