
(FPCore (x y) :precision binary64 (/ (* (sin x) (sinh y)) x))
double code(double x, double y) {
return (sin(x) * sinh(y)) / x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (sin(x) * sinh(y)) / x
end function
public static double code(double x, double y) {
return (Math.sin(x) * Math.sinh(y)) / x;
}
def code(x, y): return (math.sin(x) * math.sinh(y)) / x
function code(x, y) return Float64(Float64(sin(x) * sinh(y)) / x) end
function tmp = code(x, y) tmp = (sin(x) * sinh(y)) / x; end
code[x_, y_] := N[(N[(N[Sin[x], $MachinePrecision] * N[Sinh[y], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin x \cdot \sinh y}{x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (* (sin x) (sinh y)) x))
double code(double x, double y) {
return (sin(x) * sinh(y)) / x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (sin(x) * sinh(y)) / x
end function
public static double code(double x, double y) {
return (Math.sin(x) * Math.sinh(y)) / x;
}
def code(x, y): return (math.sin(x) * math.sinh(y)) / x
function code(x, y) return Float64(Float64(sin(x) * sinh(y)) / x) end
function tmp = code(x, y) tmp = (sin(x) * sinh(y)) / x; end
code[x_, y_] := N[(N[(N[Sin[x], $MachinePrecision] * N[Sinh[y], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin x \cdot \sinh y}{x}
\end{array}
(FPCore (x y) :precision binary64 (* (/ (sin x) x) (sinh y)))
double code(double x, double y) {
return (sin(x) / x) * sinh(y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (sin(x) / x) * sinh(y)
end function
public static double code(double x, double y) {
return (Math.sin(x) / x) * Math.sinh(y);
}
def code(x, y): return (math.sin(x) / x) * math.sinh(y)
function code(x, y) return Float64(Float64(sin(x) / x) * sinh(y)) end
function tmp = code(x, y) tmp = (sin(x) / x) * sinh(y); end
code[x_, y_] := N[(N[(N[Sin[x], $MachinePrecision] / x), $MachinePrecision] * N[Sinh[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin x}{x} \cdot \sinh y
\end{array}
Initial program 91.9%
associate-/l*99.9%
Simplified99.9%
add-log-exp68.3%
*-un-lft-identity68.3%
log-prod68.3%
metadata-eval68.3%
add-log-exp99.9%
Applied egg-rr99.9%
+-lft-identity99.9%
associate-*r/91.9%
associate-*l/99.9%
Simplified99.9%
(FPCore (x y) :precision binary64 (if (<= (sinh y) 5e-11) (* (/ (sin x) x) y) (sinh y)))
double code(double x, double y) {
double tmp;
if (sinh(y) <= 5e-11) {
tmp = (sin(x) / x) * y;
} else {
tmp = sinh(y);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (sinh(y) <= 5d-11) then
tmp = (sin(x) / x) * y
else
tmp = sinh(y)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (Math.sinh(y) <= 5e-11) {
tmp = (Math.sin(x) / x) * y;
} else {
tmp = Math.sinh(y);
}
return tmp;
}
def code(x, y): tmp = 0 if math.sinh(y) <= 5e-11: tmp = (math.sin(x) / x) * y else: tmp = math.sinh(y) return tmp
function code(x, y) tmp = 0.0 if (sinh(y) <= 5e-11) tmp = Float64(Float64(sin(x) / x) * y); else tmp = sinh(y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (sinh(y) <= 5e-11) tmp = (sin(x) / x) * y; else tmp = sinh(y); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[Sinh[y], $MachinePrecision], 5e-11], N[(N[(N[Sin[x], $MachinePrecision] / x), $MachinePrecision] * y), $MachinePrecision], N[Sinh[y], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\sinh y \leq 5 \cdot 10^{-11}:\\
\;\;\;\;\frac{\sin x}{x} \cdot y\\
\mathbf{else}:\\
\;\;\;\;\sinh y\\
\end{array}
\end{array}
if (sinh.f64 y) < 5.00000000000000018e-11Initial program 89.4%
associate-/l*99.8%
Simplified99.8%
Taylor expanded in y around 0 54.9%
associate-/l*65.4%
Simplified65.4%
if 5.00000000000000018e-11 < (sinh.f64 y) Initial program 100.0%
associate-/l*100.0%
Simplified100.0%
Taylor expanded in x around 0 69.0%
clear-num69.0%
un-div-inv69.0%
Applied egg-rr69.0%
associate-/r/69.0%
*-inverses69.0%
*-commutative69.0%
*-rgt-identity69.0%
Simplified69.0%
Final simplification66.3%
(FPCore (x y) :precision binary64 (if (<= (sinh y) 4e-60) (/ x (/ x y)) (sinh y)))
double code(double x, double y) {
double tmp;
if (sinh(y) <= 4e-60) {
tmp = x / (x / y);
} else {
tmp = sinh(y);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (sinh(y) <= 4d-60) then
tmp = x / (x / y)
else
tmp = sinh(y)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (Math.sinh(y) <= 4e-60) {
tmp = x / (x / y);
} else {
tmp = Math.sinh(y);
}
return tmp;
}
def code(x, y): tmp = 0 if math.sinh(y) <= 4e-60: tmp = x / (x / y) else: tmp = math.sinh(y) return tmp
function code(x, y) tmp = 0.0 if (sinh(y) <= 4e-60) tmp = Float64(x / Float64(x / y)); else tmp = sinh(y); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (sinh(y) <= 4e-60) tmp = x / (x / y); else tmp = sinh(y); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[Sinh[y], $MachinePrecision], 4e-60], N[(x / N[(x / y), $MachinePrecision]), $MachinePrecision], N[Sinh[y], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\sinh y \leq 4 \cdot 10^{-60}:\\
\;\;\;\;\frac{x}{\frac{x}{y}}\\
\mathbf{else}:\\
\;\;\;\;\sinh y\\
\end{array}
\end{array}
if (sinh.f64 y) < 3.9999999999999999e-60Initial program 89.4%
associate-/l*99.8%
Simplified99.8%
clear-num99.3%
un-div-inv99.4%
Applied egg-rr99.4%
Taylor expanded in y around 0 73.5%
Taylor expanded in x around 0 60.3%
if 3.9999999999999999e-60 < (sinh.f64 y) Initial program 98.6%
associate-/l*100.0%
Simplified100.0%
Taylor expanded in x around 0 65.6%
clear-num65.6%
un-div-inv65.6%
Applied egg-rr65.6%
associate-/r/65.6%
*-inverses65.6%
*-commutative65.6%
*-rgt-identity65.6%
Simplified65.6%
(FPCore (x y) :precision binary64 (* (sin x) (/ (sinh y) x)))
double code(double x, double y) {
return sin(x) * (sinh(y) / x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = sin(x) * (sinh(y) / x)
end function
public static double code(double x, double y) {
return Math.sin(x) * (Math.sinh(y) / x);
}
def code(x, y): return math.sin(x) * (math.sinh(y) / x)
function code(x, y) return Float64(sin(x) * Float64(sinh(y) / x)) end
function tmp = code(x, y) tmp = sin(x) * (sinh(y) / x); end
code[x_, y_] := N[(N[Sin[x], $MachinePrecision] * N[(N[Sinh[y], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin x \cdot \frac{\sinh y}{x}
\end{array}
Initial program 91.9%
associate-/l*99.9%
Simplified99.9%
(FPCore (x y) :precision binary64 (* x (/ (sinh y) x)))
double code(double x, double y) {
return x * (sinh(y) / x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (sinh(y) / x)
end function
public static double code(double x, double y) {
return x * (Math.sinh(y) / x);
}
def code(x, y): return x * (math.sinh(y) / x)
function code(x, y) return Float64(x * Float64(sinh(y) / x)) end
function tmp = code(x, y) tmp = x * (sinh(y) / x); end
code[x_, y_] := N[(x * N[(N[Sinh[y], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{\sinh y}{x}
\end{array}
Initial program 91.9%
associate-/l*99.9%
Simplified99.9%
Taylor expanded in x around 0 74.4%
(FPCore (x y) :precision binary64 (* x (/ y x)))
double code(double x, double y) {
return x * (y / x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (y / x)
end function
public static double code(double x, double y) {
return x * (y / x);
}
def code(x, y): return x * (y / x)
function code(x, y) return Float64(x * Float64(y / x)) end
function tmp = code(x, y) tmp = x * (y / x); end
code[x_, y_] := N[(x * N[(y / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{y}{x}
\end{array}
Initial program 91.9%
associate-/l*99.9%
Simplified99.9%
Taylor expanded in y around 0 62.6%
Taylor expanded in x around 0 50.0%
(FPCore (x y) :precision binary64 y)
double code(double x, double y) {
return y;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = y
end function
public static double code(double x, double y) {
return y;
}
def code(x, y): return y
function code(x, y) return y end
function tmp = code(x, y) tmp = y; end
code[x_, y_] := y
\begin{array}{l}
\\
y
\end{array}
Initial program 91.9%
associate-/l*99.9%
Simplified99.9%
Taylor expanded in x around 0 74.4%
Taylor expanded in y around 0 25.1%
(FPCore (x y) :precision binary64 (* (sin x) (/ (sinh y) x)))
double code(double x, double y) {
return sin(x) * (sinh(y) / x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = sin(x) * (sinh(y) / x)
end function
public static double code(double x, double y) {
return Math.sin(x) * (Math.sinh(y) / x);
}
def code(x, y): return math.sin(x) * (math.sinh(y) / x)
function code(x, y) return Float64(sin(x) * Float64(sinh(y) / x)) end
function tmp = code(x, y) tmp = sin(x) * (sinh(y) / x); end
code[x_, y_] := N[(N[Sin[x], $MachinePrecision] * N[(N[Sinh[y], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin x \cdot \frac{\sinh y}{x}
\end{array}
herbie shell --seed 2024133
(FPCore (x y)
:name "Linear.Quaternion:$ccosh from linear-1.19.1.3"
:precision binary64
:alt
(! :herbie-platform default (* (sin x) (/ (sinh y) x)))
(/ (* (sin x) (sinh y)) x))