
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
(FPCore (x y) :precision binary64 (* (cosh x) (/ (sin y) y)))
double code(double x, double y) {
return cosh(x) * (sin(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x) * (sin(y) / y)
end function
public static double code(double x, double y) {
return Math.cosh(x) * (Math.sin(y) / y);
}
def code(x, y): return math.cosh(x) * (math.sin(y) / y)
function code(x, y) return Float64(cosh(x) * Float64(sin(y) / y)) end
function tmp = code(x, y) tmp = cosh(x) * (sin(y) / y); end
code[x_, y_] := N[(N[Cosh[x], $MachinePrecision] * N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cosh x \cdot \frac{\sin y}{y}
\end{array}
Initial program 99.9%
(FPCore (x y) :precision binary64 (if (<= (cosh x) 1.00000001) (/ (sin y) y) (cosh x)))
double code(double x, double y) {
double tmp;
if (cosh(x) <= 1.00000001) {
tmp = sin(y) / y;
} else {
tmp = cosh(x);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (cosh(x) <= 1.00000001d0) then
tmp = sin(y) / y
else
tmp = cosh(x)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (Math.cosh(x) <= 1.00000001) {
tmp = Math.sin(y) / y;
} else {
tmp = Math.cosh(x);
}
return tmp;
}
def code(x, y): tmp = 0 if math.cosh(x) <= 1.00000001: tmp = math.sin(y) / y else: tmp = math.cosh(x) return tmp
function code(x, y) tmp = 0.0 if (cosh(x) <= 1.00000001) tmp = Float64(sin(y) / y); else tmp = cosh(x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (cosh(x) <= 1.00000001) tmp = sin(y) / y; else tmp = cosh(x); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[Cosh[x], $MachinePrecision], 1.00000001], N[(N[Sin[y], $MachinePrecision] / y), $MachinePrecision], N[Cosh[x], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cosh x \leq 1.00000001:\\
\;\;\;\;\frac{\sin y}{y}\\
\mathbf{else}:\\
\;\;\;\;\cosh x\\
\end{array}
\end{array}
if (cosh.f64 x) < 1.0000000099999999Initial program 99.8%
Taylor expanded in x around 0 99.6%
if 1.0000000099999999 < (cosh.f64 x) Initial program 100.0%
Taylor expanded in y around 0 75.9%
*-rgt-identity75.9%
add-log-exp75.9%
*-un-lft-identity75.9%
log-prod75.9%
metadata-eval75.9%
add-log-exp75.9%
Applied egg-rr75.9%
+-lft-identity75.9%
Simplified75.9%
(FPCore (x y) :precision binary64 (cosh x))
double code(double x, double y) {
return cosh(x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = cosh(x)
end function
public static double code(double x, double y) {
return Math.cosh(x);
}
def code(x, y): return math.cosh(x)
function code(x, y) return cosh(x) end
function tmp = code(x, y) tmp = cosh(x); end
code[x_, y_] := N[Cosh[x], $MachinePrecision]
\begin{array}{l}
\\
\cosh x
\end{array}
Initial program 99.9%
Taylor expanded in y around 0 64.7%
*-rgt-identity64.7%
add-log-exp64.7%
*-un-lft-identity64.7%
log-prod64.7%
metadata-eval64.7%
add-log-exp64.7%
Applied egg-rr64.7%
+-lft-identity64.7%
Simplified64.7%
(FPCore (x y) :precision binary64 (* (* y (+ 1.0 (* -0.16666666666666666 (* y y)))) (/ 1.0 y)))
double code(double x, double y) {
return (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (y * (1.0d0 + ((-0.16666666666666666d0) * (y * y)))) * (1.0d0 / y)
end function
public static double code(double x, double y) {
return (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y);
}
def code(x, y): return (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y)
function code(x, y) return Float64(Float64(y * Float64(1.0 + Float64(-0.16666666666666666 * Float64(y * y)))) * Float64(1.0 / y)) end
function tmp = code(x, y) tmp = (y * (1.0 + (-0.16666666666666666 * (y * y)))) * (1.0 / y); end
code[x_, y_] := N[(N[(y * N[(1.0 + N[(-0.16666666666666666 * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(y \cdot \left(1 + -0.16666666666666666 \cdot \left(y \cdot y\right)\right)\right) \cdot \frac{1}{y}
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*l/99.9%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in x around 0 47.9%
Taylor expanded in y around 0 33.4%
unpow233.4%
Applied egg-rr33.4%
(FPCore (x y) :precision binary64 (+ 1.0 (* -0.16666666666666666 (* y y))))
double code(double x, double y) {
return 1.0 + (-0.16666666666666666 * (y * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0 + ((-0.16666666666666666d0) * (y * y))
end function
public static double code(double x, double y) {
return 1.0 + (-0.16666666666666666 * (y * y));
}
def code(x, y): return 1.0 + (-0.16666666666666666 * (y * y))
function code(x, y) return Float64(1.0 + Float64(-0.16666666666666666 * Float64(y * y))) end
function tmp = code(x, y) tmp = 1.0 + (-0.16666666666666666 * (y * y)); end
code[x_, y_] := N[(1.0 + N[(-0.16666666666666666 * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + -0.16666666666666666 \cdot \left(y \cdot y\right)
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*l/99.9%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in x around 0 47.9%
Taylor expanded in y around 0 31.3%
*-commutative31.3%
Simplified31.3%
unpow233.4%
Applied egg-rr31.3%
Final simplification31.3%
(FPCore (x y) :precision binary64 1.0)
double code(double x, double y) {
return 1.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 1.0d0
end function
public static double code(double x, double y) {
return 1.0;
}
def code(x, y): return 1.0
function code(x, y) return 1.0 end
function tmp = code(x, y) tmp = 1.0; end
code[x_, y_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 99.9%
*-commutative99.9%
associate-*l/99.9%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in x around 0 47.9%
Taylor expanded in y around 0 25.6%
Taylor expanded in y around 0 25.7%
(FPCore (x y) :precision binary64 (/ (* (cosh x) (sin y)) y))
double code(double x, double y) {
return (cosh(x) * sin(y)) / y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (cosh(x) * sin(y)) / y
end function
public static double code(double x, double y) {
return (Math.cosh(x) * Math.sin(y)) / y;
}
def code(x, y): return (math.cosh(x) * math.sin(y)) / y
function code(x, y) return Float64(Float64(cosh(x) * sin(y)) / y) end
function tmp = code(x, y) tmp = (cosh(x) * sin(y)) / y; end
code[x_, y_] := N[(N[(N[Cosh[x], $MachinePrecision] * N[Sin[y], $MachinePrecision]), $MachinePrecision] / y), $MachinePrecision]
\begin{array}{l}
\\
\frac{\cosh x \cdot \sin y}{y}
\end{array}
herbie shell --seed 2024116
(FPCore (x y)
:name "Linear.Quaternion:$csinh from linear-1.19.1.3"
:precision binary64
:alt
(! :herbie-platform default (/ (* (cosh x) (sin y)) y))
(* (cosh x) (/ (sin y) y)))