
(FPCore (x y) :precision binary64 (* (sin x) (/ (sinh y) y)))
double code(double x, double y) {
return sin(x) * (sinh(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = sin(x) * (sinh(y) / y)
end function
public static double code(double x, double y) {
return Math.sin(x) * (Math.sinh(y) / y);
}
def code(x, y): return math.sin(x) * (math.sinh(y) / y)
function code(x, y) return Float64(sin(x) * Float64(sinh(y) / y)) end
function tmp = code(x, y) tmp = sin(x) * (sinh(y) / y); end
code[x_, y_] := N[(N[Sin[x], $MachinePrecision] * N[(N[Sinh[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin x \cdot \frac{\sinh y}{y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (* (sin x) (/ (sinh y) y)))
double code(double x, double y) {
return sin(x) * (sinh(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = sin(x) * (sinh(y) / y)
end function
public static double code(double x, double y) {
return Math.sin(x) * (Math.sinh(y) / y);
}
def code(x, y): return math.sin(x) * (math.sinh(y) / y)
function code(x, y) return Float64(sin(x) * Float64(sinh(y) / y)) end
function tmp = code(x, y) tmp = sin(x) * (sinh(y) / y); end
code[x_, y_] := N[(N[Sin[x], $MachinePrecision] * N[(N[Sinh[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin x \cdot \frac{\sinh y}{y}
\end{array}
(FPCore (x y) :precision binary64 (/ (sin x) (/ y (sinh y))))
double code(double x, double y) {
return sin(x) / (y / sinh(y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = sin(x) / (y / sinh(y))
end function
public static double code(double x, double y) {
return Math.sin(x) / (y / Math.sinh(y));
}
def code(x, y): return math.sin(x) / (y / math.sinh(y))
function code(x, y) return Float64(sin(x) / Float64(y / sinh(y))) end
function tmp = code(x, y) tmp = sin(x) / (y / sinh(y)); end
code[x_, y_] := N[(N[Sin[x], $MachinePrecision] / N[(y / N[Sinh[y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin x}{\frac{y}{\sinh y}}
\end{array}
Initial program 100.0%
add-log-exp73.8%
*-un-lft-identity73.8%
log-prod73.8%
metadata-eval73.8%
add-log-exp100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
associate-*r/85.8%
associate-*l/90.2%
associate-/r/100.0%
Simplified100.0%
(FPCore (x y) :precision binary64 (* (sin x) (/ (sinh y) y)))
double code(double x, double y) {
return sin(x) * (sinh(y) / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = sin(x) * (sinh(y) / y)
end function
public static double code(double x, double y) {
return Math.sin(x) * (Math.sinh(y) / y);
}
def code(x, y): return math.sin(x) * (math.sinh(y) / y)
function code(x, y) return Float64(sin(x) * Float64(sinh(y) / y)) end
function tmp = code(x, y) tmp = sin(x) * (sinh(y) / y); end
code[x_, y_] := N[(N[Sin[x], $MachinePrecision] * N[(N[Sinh[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin x \cdot \frac{\sinh y}{y}
\end{array}
Initial program 100.0%
(FPCore (x y) :precision binary64 (if (<= y 1450.0) (sin x) (* x (/ (sinh y) y))))
double code(double x, double y) {
double tmp;
if (y <= 1450.0) {
tmp = sin(x);
} else {
tmp = x * (sinh(y) / y);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= 1450.0d0) then
tmp = sin(x)
else
tmp = x * (sinh(y) / y)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= 1450.0) {
tmp = Math.sin(x);
} else {
tmp = x * (Math.sinh(y) / y);
}
return tmp;
}
def code(x, y): tmp = 0 if y <= 1450.0: tmp = math.sin(x) else: tmp = x * (math.sinh(y) / y) return tmp
function code(x, y) tmp = 0.0 if (y <= 1450.0) tmp = sin(x); else tmp = Float64(x * Float64(sinh(y) / y)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= 1450.0) tmp = sin(x); else tmp = x * (sinh(y) / y); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, 1450.0], N[Sin[x], $MachinePrecision], N[(x * N[(N[Sinh[y], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1450:\\
\;\;\;\;\sin x\\
\mathbf{else}:\\
\;\;\;\;x \cdot \frac{\sinh y}{y}\\
\end{array}
\end{array}
if y < 1450Initial program 100.0%
Taylor expanded in y around 0 61.6%
if 1450 < y Initial program 100.0%
Taylor expanded in x around 0 65.5%
(FPCore (x y) :precision binary64 (if (<= y 540.0) (sin x) (* x (+ 1.0 (* (* x x) -0.16666666666666666)))))
double code(double x, double y) {
double tmp;
if (y <= 540.0) {
tmp = sin(x);
} else {
tmp = x * (1.0 + ((x * x) * -0.16666666666666666));
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= 540.0d0) then
tmp = sin(x)
else
tmp = x * (1.0d0 + ((x * x) * (-0.16666666666666666d0)))
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= 540.0) {
tmp = Math.sin(x);
} else {
tmp = x * (1.0 + ((x * x) * -0.16666666666666666));
}
return tmp;
}
def code(x, y): tmp = 0 if y <= 540.0: tmp = math.sin(x) else: tmp = x * (1.0 + ((x * x) * -0.16666666666666666)) return tmp
function code(x, y) tmp = 0.0 if (y <= 540.0) tmp = sin(x); else tmp = Float64(x * Float64(1.0 + Float64(Float64(x * x) * -0.16666666666666666))); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= 540.0) tmp = sin(x); else tmp = x * (1.0 + ((x * x) * -0.16666666666666666)); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, 540.0], N[Sin[x], $MachinePrecision], N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 540:\\
\;\;\;\;\sin x\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(1 + \left(x \cdot x\right) \cdot -0.16666666666666666\right)\\
\end{array}
\end{array}
if y < 540Initial program 100.0%
Taylor expanded in y around 0 61.6%
if 540 < y Initial program 100.0%
Taylor expanded in y around 0 2.7%
Taylor expanded in x around 0 27.0%
*-commutative27.0%
Simplified27.0%
unpow227.0%
Applied egg-rr27.0%
(FPCore (x y) :precision binary64 (+ x (* -0.16666666666666666 (* x (* x x)))))
double code(double x, double y) {
return x + (-0.16666666666666666 * (x * (x * x)));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + ((-0.16666666666666666d0) * (x * (x * x)))
end function
public static double code(double x, double y) {
return x + (-0.16666666666666666 * (x * (x * x)));
}
def code(x, y): return x + (-0.16666666666666666 * (x * (x * x)))
function code(x, y) return Float64(x + Float64(-0.16666666666666666 * Float64(x * Float64(x * x)))) end
function tmp = code(x, y) tmp = x + (-0.16666666666666666 * (x * (x * x))); end
code[x_, y_] := N[(x + N[(-0.16666666666666666 * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -0.16666666666666666 \cdot \left(x \cdot \left(x \cdot x\right)\right)
\end{array}
Initial program 100.0%
Taylor expanded in y around 0 48.9%
Taylor expanded in x around 0 40.8%
distribute-rgt-in40.8%
*-lft-identity40.8%
associate-*l*40.8%
pow-plus40.8%
metadata-eval40.8%
Simplified40.8%
unpow340.8%
unpow240.8%
Applied egg-rr40.8%
unpow240.8%
Applied egg-rr40.8%
Final simplification40.8%
(FPCore (x y) :precision binary64 (* x (+ 1.0 (* (* x x) -0.16666666666666666))))
double code(double x, double y) {
return x * (1.0 + ((x * x) * -0.16666666666666666));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * (1.0d0 + ((x * x) * (-0.16666666666666666d0)))
end function
public static double code(double x, double y) {
return x * (1.0 + ((x * x) * -0.16666666666666666));
}
def code(x, y): return x * (1.0 + ((x * x) * -0.16666666666666666))
function code(x, y) return Float64(x * Float64(1.0 + Float64(Float64(x * x) * -0.16666666666666666))) end
function tmp = code(x, y) tmp = x * (1.0 + ((x * x) * -0.16666666666666666)); end
code[x_, y_] := N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + \left(x \cdot x\right) \cdot -0.16666666666666666\right)
\end{array}
Initial program 100.0%
Taylor expanded in y around 0 48.9%
Taylor expanded in x around 0 40.8%
*-commutative40.8%
Simplified40.8%
unpow240.8%
Applied egg-rr40.8%
(FPCore (x y) :precision binary64 x)
double code(double x, double y) {
return x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x
end function
public static double code(double x, double y) {
return x;
}
def code(x, y): return x
function code(x, y) return x end
function tmp = code(x, y) tmp = x; end
code[x_, y_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 64.7%
Taylor expanded in y around 0 30.0%
herbie shell --seed 2024133
(FPCore (x y)
:name "Linear.Quaternion:$ccos from linear-1.19.1.3"
:precision binary64
(* (sin x) (/ (sinh y) y)))