
(FPCore (x y) :precision binary64 (* x (- 1.0 (* x y))))
double code(double x, double y) {
return x * (1.0 - (x * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (1.0d0 - (x * y))
end function
public static double code(double x, double y) {
return x * (1.0 - (x * y));
}
def code(x, y): return x * (1.0 - (x * y))
function code(x, y) return Float64(x * Float64(1.0 - Float64(x * y))) end
function tmp = code(x, y) tmp = x * (1.0 - (x * y)); end
code[x_, y_] := N[(x * N[(1.0 - N[(x * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 - x \cdot y\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* x (- 1.0 (* x y))))
double code(double x, double y) {
return x * (1.0 - (x * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (1.0d0 - (x * y))
end function
public static double code(double x, double y) {
return x * (1.0 - (x * y));
}
def code(x, y): return x * (1.0 - (x * y))
function code(x, y) return Float64(x * Float64(1.0 - Float64(x * y))) end
function tmp = code(x, y) tmp = x * (1.0 - (x * y)); end
code[x_, y_] := N[(x * N[(1.0 - N[(x * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 - x \cdot y\right)
\end{array}
(FPCore (x y) :precision binary64 (/ x (/ -1.0 (fma x y -1.0))))
double code(double x, double y) {
return x / (-1.0 / fma(x, y, -1.0));
}
function code(x, y) return Float64(x / Float64(-1.0 / fma(x, y, -1.0))) end
code[x_, y_] := N[(x / N[(-1.0 / N[(x * y + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\frac{-1}{\mathsf{fma}\left(x, y, -1\right)}}
\end{array}
Initial program 99.9%
Applied egg-rr69.3%
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
clear-numN/A
neg-sub0N/A
+-commutativeN/A
associate--r+N/A
metadata-evalN/A
metadata-evalN/A
associate-*r*N/A
swap-sqrN/A
+-commutativeN/A
flip--N/A
metadata-evalN/A
*-rgt-identityN/A
frac-2negN/A
Applied egg-rr99.9%
(FPCore (x y) :precision binary64 (* x (- 1.0 (* x y))))
double code(double x, double y) {
return x * (1.0 - (x * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (1.0d0 - (x * y))
end function
public static double code(double x, double y) {
return x * (1.0 - (x * y));
}
def code(x, y): return x * (1.0 - (x * y))
function code(x, y) return Float64(x * Float64(1.0 - Float64(x * y))) end
function tmp = code(x, y) tmp = x * (1.0 - (x * y)); end
code[x_, y_] := N[(x * N[(1.0 - N[(x * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 - x \cdot y\right)
\end{array}
Initial program 99.9%
(FPCore (x y) :precision binary64 x)
double code(double x, double y) {
return x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x
end function
public static double code(double x, double y) {
return x;
}
def code(x, y): return x
function code(x, y) return x end
function tmp = code(x, y) tmp = x; end
code[x_, y_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
Simplified52.4%
herbie shell --seed 2024196
(FPCore (x y)
:name "Numeric.SpecFunctions:log1p from math-functions-0.1.5.2, A"
:precision binary64
(* x (- 1.0 (* x y))))