
(FPCore (x y) :precision binary64 (* x (/ (sin y) y)))
double code(double x, double y) {
return x * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (sin(y) / y)
end function
public static double code(double x, double y) {
return x * (Math.sin(y) / y);
}
def code(x, y): return x * (math.sin(y) / y)
function code(x, y) return Float64(x * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = x * (sin(y) / y); end
code[x_, y_] := N[(x * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{\sin y}{y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* x (/ (sin y) y)))
double code(double x, double y) {
return x * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (sin(y) / y)
end function
public static double code(double x, double y) {
return x * (Math.sin(y) / y);
}
def code(x, y): return x * (math.sin(y) / y)
function code(x, y) return Float64(x * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = x * (sin(y) / y); end
code[x_, y_] := N[(x * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{\sin y}{y}
\end{array}
(FPCore (x y) :precision binary64 (/ x (/ y (sin y))))
double code(double x, double y) {
return x / (y / sin(y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x / (y / sin(y))
end function
public static double code(double x, double y) {
return x / (y / Math.sin(y));
}
def code(x, y): return x / (y / math.sin(y))
function code(x, y) return Float64(x / Float64(y / sin(y))) end
function tmp = code(x, y) tmp = x / (y / sin(y)); end
code[x_, y_] := N[(x / N[(y / N[Sin[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\frac{y}{\sin y}}
\end{array}
Initial program 99.8%
lift-sin.f64N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.8
Applied rewrites99.8%
(FPCore (x y) :precision binary64 (* x (/ (sin y) y)))
double code(double x, double y) {
return x * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (sin(y) / y)
end function
public static double code(double x, double y) {
return x * (Math.sin(y) / y);
}
def code(x, y): return x * (math.sin(y) / y)
function code(x, y) return Float64(x * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = x * (sin(y) / y); end
code[x_, y_] := N[(x * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{\sin y}{y}
\end{array}
Initial program 99.8%
(FPCore (x y) :precision binary64 (if (<= y 13000000.0) (* x (fma y (* y -0.16666666666666666) 1.0)) (/ x (* 0.16666666666666666 (* y y)))))
double code(double x, double y) {
double tmp;
if (y <= 13000000.0) {
tmp = x * fma(y, (y * -0.16666666666666666), 1.0);
} else {
tmp = x / (0.16666666666666666 * (y * y));
}
return tmp;
}
function code(x, y) tmp = 0.0 if (y <= 13000000.0) tmp = Float64(x * fma(y, Float64(y * -0.16666666666666666), 1.0)); else tmp = Float64(x / Float64(0.16666666666666666 * Float64(y * y))); end return tmp end
code[x_, y_] := If[LessEqual[y, 13000000.0], N[(x * N[(y * N[(y * -0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(x / N[(0.16666666666666666 * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 13000000:\\
\;\;\;\;x \cdot \mathsf{fma}\left(y, y \cdot -0.16666666666666666, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{x}{0.16666666666666666 \cdot \left(y \cdot y\right)}\\
\end{array}
\end{array}
if y < 1.3e7Initial program 99.8%
Taylor expanded in y around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6464.8
Applied rewrites64.8%
if 1.3e7 < y Initial program 99.6%
lift-sin.f64N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
Taylor expanded in y around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6425.5
Applied rewrites25.5%
Taylor expanded in y around inf
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6425.5
Applied rewrites25.5%
Final simplification54.4%
(FPCore (x y) :precision binary64 (if (<= y 13000000.0) (* x (fma y (* y -0.16666666666666666) 1.0)) (* x (/ 6.0 (* y y)))))
double code(double x, double y) {
double tmp;
if (y <= 13000000.0) {
tmp = x * fma(y, (y * -0.16666666666666666), 1.0);
} else {
tmp = x * (6.0 / (y * y));
}
return tmp;
}
function code(x, y) tmp = 0.0 if (y <= 13000000.0) tmp = Float64(x * fma(y, Float64(y * -0.16666666666666666), 1.0)); else tmp = Float64(x * Float64(6.0 / Float64(y * y))); end return tmp end
code[x_, y_] := If[LessEqual[y, 13000000.0], N[(x * N[(y * N[(y * -0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(x * N[(6.0 / N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 13000000:\\
\;\;\;\;x \cdot \mathsf{fma}\left(y, y \cdot -0.16666666666666666, 1\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \frac{6}{y \cdot y}\\
\end{array}
\end{array}
if y < 1.3e7Initial program 99.8%
Taylor expanded in y around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6464.8
Applied rewrites64.8%
if 1.3e7 < y Initial program 99.6%
lift-sin.f64N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
Taylor expanded in y around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6425.5
Applied rewrites25.5%
Taylor expanded in y around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6425.5
Applied rewrites25.5%
(FPCore (x y) :precision binary64 (/ x (fma y (* y 0.16666666666666666) 1.0)))
double code(double x, double y) {
return x / fma(y, (y * 0.16666666666666666), 1.0);
}
function code(x, y) return Float64(x / fma(y, Float64(y * 0.16666666666666666), 1.0)) end
code[x_, y_] := N[(x / N[(y * N[(y * 0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\mathsf{fma}\left(y, y \cdot 0.16666666666666666, 1\right)}
\end{array}
Initial program 99.8%
lift-sin.f64N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.8
Applied rewrites99.8%
Taylor expanded in y around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6458.7
Applied rewrites58.7%
(FPCore (x y) :precision binary64 x)
double code(double x, double y) {
return x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x
end function
public static double code(double x, double y) {
return x;
}
def code(x, y): return x
function code(x, y) return x end
function tmp = code(x, y) tmp = x; end
code[x_, y_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 99.8%
Taylor expanded in y around 0
Applied rewrites49.0%
*-rgt-identity49.0
Applied rewrites49.0%
herbie shell --seed 2024219
(FPCore (x y)
:name "Linear.Quaternion:$cexp from linear-1.19.1.3"
:precision binary64
(* x (/ (sin y) y)))