
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
(FPCore (x y) :precision binary64 (/ (sin y) (/ y (cosh x))))
double code(double x, double y) {
return sin(y) / (y / cosh(x));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = sin(y) / (y / cosh(x))
end function
public static double code(double x, double y) {
return Math.sin(y) / (y / Math.cosh(x));
}
def code(x, y): return math.sin(y) / (y / math.cosh(x))
function code(x, y) return Float64(sin(y) / Float64(y / cosh(x))) end
function tmp = code(x, y) tmp = sin(y) / (y / cosh(x)); end
code[x_, y_] := N[(N[Sin[y], $MachinePrecision] / N[(y / N[Cosh[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin y}{\frac{y}{\cosh x}}
\end{array}
Initial program 99.9%
add-sqr-sqrt99.9%
pow299.9%
Applied egg-rr99.9%
unpow299.9%
add-sqr-sqrt99.9%
associate-*r/99.9%
associate-*l/99.8%
clear-num99.8%
associate-*l/99.9%
*-un-lft-identity99.9%
Applied egg-rr99.9%
(FPCore (x y) :precision binary64 (if (<= (cosh x) 1.1) (/ (sin y) y) (cosh x)))
double code(double x, double y) {
double tmp;
if (cosh(x) <= 1.1) {
tmp = sin(y) / y;
} else {
tmp = cosh(x);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (cosh(x) <= 1.1d0) then
tmp = sin(y) / y
else
tmp = cosh(x)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (Math.cosh(x) <= 1.1) {
tmp = Math.sin(y) / y;
} else {
tmp = Math.cosh(x);
}
return tmp;
}
def code(x, y): tmp = 0 if math.cosh(x) <= 1.1: tmp = math.sin(y) / y else: tmp = math.cosh(x) return tmp
function code(x, y) tmp = 0.0 if (cosh(x) <= 1.1) tmp = Float64(sin(y) / y); else tmp = cosh(x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (cosh(x) <= 1.1) tmp = sin(y) / y; else tmp = cosh(x); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[Cosh[x], $MachinePrecision], 1.1], N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision], N[Cosh[x], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cosh x \leq 1.1:\\
\;\;\;\;\frac{\sin y}{y}\\
\mathbf{else}:\\
\;\;\;\;\cosh x\\
\end{array}
\end{array}
if (cosh.f64 x) < 1.1000000000000001Initial program 99.8%
Taylor expanded in x around 0 97.1%
if 1.1000000000000001 < (cosh.f64 x) Initial program 100.0%
Taylor expanded in y around 0 65.3%
Final simplification81.7%
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
Initial program 99.9%
(FPCore (x y) :precision binary64 (if (<= y 6.1e+106) (cosh x) (* (* y (+ 1.0 (* -0.16666666666666666 (* y y)))) (/ 1.0 y))))
double code(double x, double y) {
double tmp;
if (y <= 6.1e+106) {
tmp = cosh(x);
} else {
tmp = (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= 6.1d+106) then
tmp = cosh(x)
else
tmp = (y * (1.0d0 + ((-0.16666666666666666d0) * (y * y)))) * (1.0d0 / y)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= 6.1e+106) {
tmp = Math.cosh(x);
} else {
tmp = (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y);
}
return tmp;
}
def code(x, y): tmp = 0 if y <= 6.1e+106: tmp = math.cosh(x) else: tmp = (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y) return tmp
function code(x, y) tmp = 0.0 if (y <= 6.1e+106) tmp = cosh(x); else tmp = Float64(Float64(y * Float64(1.0 + Float64(-0.16666666666666666 * Float64(y * y)))) * Float64(1.0 / y)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= 6.1e+106) tmp = cosh(x); else tmp = (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, 6.1e+106], N[Cosh[x], $MachinePrecision], N[(N[(y * N[(1.0 + N[(-0.16666666666666666 * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / y), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 6.1 \cdot 10^{+106}:\\
\;\;\;\;\cosh x\\
\mathbf{else}:\\
\;\;\;\;\left(y \cdot \left(1 + -0.16666666666666666 \cdot \left(y \cdot y\right)\right)\right) \cdot \frac{1}{y}\\
\end{array}
\end{array}
if y < 6.10000000000000001e106Initial program 99.9%
Taylor expanded in y around 0 61.8%
if 6.10000000000000001e106 < y Initial program 99.8%
*-commutative99.8%
associate-*l/99.8%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in x around 0 52.3%
Taylor expanded in y around 0 24.6%
unpow224.6%
Applied egg-rr24.6%
Final simplification56.3%
(FPCore (x y) :precision binary64 (* (* y (+ 1.0 (* -0.16666666666666666 (* y y)))) (/ 1.0 y)))
double code(double x, double y) {
return (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y * (1.0d0 + ((-0.16666666666666666d0) * (y * y)))) * (1.0d0 / y)
end function
public static double code(double x, double y) {
return (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y);
}
def code(x, y): return (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y)
function code(x, y) return Float64(Float64(y * Float64(1.0 + Float64(-0.16666666666666666 * Float64(y * y)))) * Float64(1.0 / y)) end
function tmp = code(x, y) tmp = (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y); end
code[x_, y_] := N[(N[(y * N[(1.0 + N[(-0.16666666666666666 * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(y \cdot \left(1 + -0.16666666666666666 \cdot \left(y \cdot y\right)\right)\right) \cdot \frac{1}{y}
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*l/99.9%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in x around 0 51.3%
Taylor expanded in y around 0 36.1%
unpow236.1%
Applied egg-rr36.1%
(FPCore (x y) :precision binary64 (+ 1.0 (* -0.16666666666666666 (* y y))))
double code(double x, double y) {
return 1.0 + (-0.16666666666666666 * (y * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 + ((-0.16666666666666666d0) * (y * y))
end function
public static double code(double x, double y) {
return 1.0 + (-0.16666666666666666 * (y * y));
}
def code(x, y): return 1.0 + (-0.16666666666666666 * (y * y))
function code(x, y) return Float64(1.0 + Float64(-0.16666666666666666 * Float64(y * y))) end
function tmp = code(x, y) tmp = 1.0 + (-0.16666666666666666 * (y * y)); end
code[x_, y_] := N[(1.0 + N[(-0.16666666666666666 * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + -0.16666666666666666 \cdot \left(y \cdot y\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*l/99.9%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in x around 0 51.3%
Taylor expanded in y around 0 33.4%
*-commutative33.4%
Simplified33.4%
unpow236.1%
Applied egg-rr33.4%
Final simplification33.4%
(FPCore (x y) :precision binary64 1.0)
double code(double x, double y) {
return 1.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0
end function
public static double code(double x, double y) {
return 1.0;
}
def code(x, y): return 1.0
function code(x, y) return 1.0 end
function tmp = code(x, y) tmp = 1.0; end
code[x_, y_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*l/99.9%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in x around 0 51.3%
Taylor expanded in y around 0 25.6%
Taylor expanded in y around 0 25.7%
(FPCore (x y) :precision binary64 (/ (* (cosh x) (sin y)) y))
double code(double x, double y) {
return (cosh(x) * sin(y)) / y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (cosh(x) * sin(y)) / y
end function
public static double code(double x, double y) {
return (Math.cosh(x) * Math.sin(y)) / y;
}
def code(x, y): return (math.cosh(x) * math.sin(y)) / y
function code(x, y) return Float64(Float64(cosh(x) * sin(y)) / y) end
function tmp = code(x, y) tmp = (cosh(x) * sin(y)) / y; end
code[x_, y_] := N[(N[(N[Cosh[x], $MachinePrecision] * N[Sin[y], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{\cosh x \cdot \sin y}{y}
\end{array}
herbie shell --seed 2024110
(FPCore (x y)
:name "Linear.Quaternion:$csinh from linear-1.19.1.3"
:precision binary64
:alt
(/ (* (cosh x) (sin y)) y)
(* (cosh x) (/ (sin y) y)))