
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (if (<= (cosh x) 1.0) (/ (sin y) y) (cosh x)))
double code(double x, double y) {
double tmp;
if (cosh(x) <= 1.0) {
tmp = sin(y) / y;
} else {
tmp = cosh(x);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (cosh(x) <= 1.0d0) then
tmp = sin(y) / y
else
tmp = cosh(x)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (Math.cosh(x) <= 1.0) {
tmp = Math.sin(y) / y;
} else {
tmp = Math.cosh(x);
}
return tmp;
}
def code(x, y): tmp = 0 if math.cosh(x) <= 1.0: tmp = math.sin(y) / y else: tmp = math.cosh(x) return tmp
function code(x, y) tmp = 0.0 if (cosh(x) <= 1.0) tmp = Float64(sin(y) / y); else tmp = cosh(x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (cosh(x) <= 1.0) tmp = sin(y) / y; else tmp = cosh(x); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[Cosh[x], $MachinePrecision], 1.0], N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision], N[Cosh[x], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cosh x \leq 1:\\
\;\;\;\;\frac{\sin y}{y}\\
\mathbf{else}:\\
\;\;\;\;\cosh x\\
\end{array}
\end{array}
if (cosh.f64 x) < 1Initial program 99.7%
*-commutative99.7%
associate-*l/99.7%
associate-/l*99.6%
Simplified99.6%
Taylor expanded in x around 0 99.7%
if 1 < (cosh.f64 x) Initial program 100.0%
Taylor expanded in y around 0 75.6%
Final simplification87.8%
(FPCore (x y) :precision binary64 (cosh x))
double code(double x, double y) {
return cosh(x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x)
end function
public static double code(double x, double y) {
return Math.cosh(x);
}
def code(x, y): return math.cosh(x)
function code(x, y) return cosh(x) end
function tmp = code(x, y) tmp = cosh(x); end
code[x_, y_] := N[Cosh[x], $MachinePrecision]
\begin{array}{l}
\\
\cosh x
\end{array}
Initial program 99.9%
Taylor expanded in y around 0 61.6%
Final simplification61.6%
(FPCore (x y) :precision binary64 (/ 1.0 (* y (+ (* y 0.16666666666666666) (/ 1.0 y)))))
double code(double x, double y) {
return 1.0 / (y * ((y * 0.16666666666666666) + (1.0 / y)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 / (y * ((y * 0.16666666666666666d0) + (1.0d0 / y)))
end function
public static double code(double x, double y) {
return 1.0 / (y * ((y * 0.16666666666666666) + (1.0 / y)));
}
def code(x, y): return 1.0 / (y * ((y * 0.16666666666666666) + (1.0 / y)))
function code(x, y) return Float64(1.0 / Float64(y * Float64(Float64(y * 0.16666666666666666) + Float64(1.0 / y)))) end
function tmp = code(x, y) tmp = 1.0 / (y * ((y * 0.16666666666666666) + (1.0 / y))); end
code[x_, y_] := N[(1.0 / N[(y * N[(N[(y * 0.16666666666666666), $MachinePrecision] + N[(1.0 / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{y \cdot \left(y \cdot 0.16666666666666666 + \frac{1}{y}\right)}
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*l/99.9%
associate-/l*99.8%
Simplified99.8%
associate-*r/99.9%
clear-num99.9%
Applied egg-rr99.9%
clear-num99.8%
associate-/r/99.8%
associate-/r*99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0 51.7%
Taylor expanded in y around 0 26.2%
Final simplification26.2%
(FPCore (x y) :precision binary64 1.0)
double code(double x, double y) {
return 1.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0
end function
public static double code(double x, double y) {
return 1.0;
}
def code(x, y): return 1.0
function code(x, y) return 1.0 end
function tmp = code(x, y) tmp = 1.0; end
code[x_, y_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*l/99.9%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in x around 0 51.7%
Taylor expanded in y around 0 25.5%
Final simplification25.5%
(FPCore (x y) :precision binary64 (/ (* (cosh x) (sin y)) y))
double code(double x, double y) {
return (cosh(x) * sin(y)) / y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (cosh(x) * sin(y)) / y
end function
public static double code(double x, double y) {
return (Math.cosh(x) * Math.sin(y)) / y;
}
def code(x, y): return (math.cosh(x) * math.sin(y)) / y
function code(x, y) return Float64(Float64(cosh(x) * sin(y)) / y) end
function tmp = code(x, y) tmp = (cosh(x) * sin(y)) / y; end
code[x_, y_] := N[(N[(N[Cosh[x], $MachinePrecision] * N[Sin[y], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{\cosh x \cdot \sin y}{y}
\end{array}
herbie shell --seed 2024062
(FPCore (x y)
:name "Linear.Quaternion:$csinh from linear-1.19.1.3"
:precision binary64
:herbie-target
(/ (* (cosh x) (sin y)) y)
(* (cosh x) (/ (sin y) y)))