
(FPCore (x eps) :precision binary64 (- (sin (+ x eps)) (sin x)))
double code(double x, double eps) {
return sin((x + eps)) - sin(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = sin((x + eps)) - sin(x)
end function
public static double code(double x, double eps) {
return Math.sin((x + eps)) - Math.sin(x);
}
def code(x, eps): return math.sin((x + eps)) - math.sin(x)
function code(x, eps) return Float64(sin(Float64(x + eps)) - sin(x)) end
function tmp = code(x, eps) tmp = sin((x + eps)) - sin(x); end
code[x_, eps_] := N[(N[Sin[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin \left(x + \varepsilon\right) - \sin x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (sin (+ x eps)) (sin x)))
double code(double x, double eps) {
return sin((x + eps)) - sin(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = sin((x + eps)) - sin(x)
end function
public static double code(double x, double eps) {
return Math.sin((x + eps)) - Math.sin(x);
}
def code(x, eps): return math.sin((x + eps)) - math.sin(x)
function code(x, eps) return Float64(sin(Float64(x + eps)) - sin(x)) end
function tmp = code(x, eps) tmp = sin((x + eps)) - sin(x); end
code[x_, eps_] := N[(N[Sin[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin \left(x + \varepsilon\right) - \sin x
\end{array}
(FPCore (x eps) :precision binary64 (* eps (fma (fma eps (* eps -0.16666666666666666) 1.0) (cos x) (* eps (* (sin x) (fma eps (* eps 0.041666666666666664) -0.5))))))
double code(double x, double eps) {
return eps * fma(fma(eps, (eps * -0.16666666666666666), 1.0), cos(x), (eps * (sin(x) * fma(eps, (eps * 0.041666666666666664), -0.5))));
}
function code(x, eps) return Float64(eps * fma(fma(eps, Float64(eps * -0.16666666666666666), 1.0), cos(x), Float64(eps * Float64(sin(x) * fma(eps, Float64(eps * 0.041666666666666664), -0.5))))) end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(eps * -0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision] * N[Cos[x], $MachinePrecision] + N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(eps * N[(eps * 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(\mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.16666666666666666, 1\right), \cos x, \varepsilon \cdot \left(\sin x \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\right)\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
associate-+l+N/A
*-commutativeN/A
distribute-rgt-inN/A
Simplified99.9%
Final simplification99.9%
(FPCore (x eps) :precision binary64 (* eps (fma eps (* (sin x) -0.5) (* (fma eps (* eps -0.16666666666666666) 1.0) (cos x)))))
double code(double x, double eps) {
return eps * fma(eps, (sin(x) * -0.5), (fma(eps, (eps * -0.16666666666666666), 1.0) * cos(x)));
}
function code(x, eps) return Float64(eps * fma(eps, Float64(sin(x) * -0.5), Float64(fma(eps, Float64(eps * -0.16666666666666666), 1.0) * cos(x)))) end
code[x_, eps_] := N[(eps * N[(eps * N[(N[Sin[x], $MachinePrecision] * -0.5), $MachinePrecision] + N[(N[(eps * N[(eps * -0.16666666666666666), $MachinePrecision] + 1.0), $MachinePrecision] * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \sin x \cdot -0.5, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.16666666666666666, 1\right) \cdot \cos x\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
associate-+l+N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
associate-*r*N/A
associate-*r*N/A
distribute-lft1-inN/A
lower-*.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f6499.6
Simplified99.6%
Final simplification99.6%
(FPCore (x eps) :precision binary64 (* eps (fma eps (* (sin x) -0.5) (cos x))))
double code(double x, double eps) {
return eps * fma(eps, (sin(x) * -0.5), cos(x));
}
function code(x, eps) return Float64(eps * fma(eps, Float64(sin(x) * -0.5), cos(x))) end
code[x_, eps_] := N[(eps * N[(eps * N[(N[Sin[x], $MachinePrecision] * -0.5), $MachinePrecision] + N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \sin x \cdot -0.5, \cos x\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
lower-cos.f6499.0
Simplified99.0%
Final simplification99.0%
(FPCore (x eps) :precision binary64 (* eps (cos x)))
double code(double x, double eps) {
return eps * cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * cos(x)
end function
public static double code(double x, double eps) {
return eps * Math.cos(x);
}
def code(x, eps): return eps * math.cos(x)
function code(x, eps) return Float64(eps * cos(x)) end
function tmp = code(x, eps) tmp = eps * cos(x); end
code[x_, eps_] := N[(eps * N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \cos x
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
lower-cos.f6498.4
Simplified98.4%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (fma 0.041666666666666664 (* eps eps) -0.5)))
(fma
x
(fma
eps
(* eps t_0)
(*
x
(fma
-0.16666666666666666
(* (* eps eps) (* x t_0))
(* eps (fma (* eps eps) 0.08333333333333333 -0.5)))))
(fma -0.16666666666666666 (* eps (* eps eps)) eps))))
double code(double x, double eps) {
double t_0 = fma(0.041666666666666664, (eps * eps), -0.5);
return fma(x, fma(eps, (eps * t_0), (x * fma(-0.16666666666666666, ((eps * eps) * (x * t_0)), (eps * fma((eps * eps), 0.08333333333333333, -0.5))))), fma(-0.16666666666666666, (eps * (eps * eps)), eps));
}
function code(x, eps) t_0 = fma(0.041666666666666664, Float64(eps * eps), -0.5) return fma(x, fma(eps, Float64(eps * t_0), Float64(x * fma(-0.16666666666666666, Float64(Float64(eps * eps) * Float64(x * t_0)), Float64(eps * fma(Float64(eps * eps), 0.08333333333333333, -0.5))))), fma(-0.16666666666666666, Float64(eps * Float64(eps * eps)), eps)) end
code[x_, eps_] := Block[{t$95$0 = N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]}, N[(x * N[(eps * N[(eps * t$95$0), $MachinePrecision] + N[(x * N[(-0.16666666666666666 * N[(N[(eps * eps), $MachinePrecision] * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(eps * N[(N[(eps * eps), $MachinePrecision] * 0.08333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.16666666666666666 * N[(eps * N[(eps * eps), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\\
\mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot t\_0, x \cdot \mathsf{fma}\left(-0.16666666666666666, \left(\varepsilon \cdot \varepsilon\right) \cdot \left(x \cdot t\_0\right), \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.08333333333333333, -0.5\right)\right)\right), \mathsf{fma}\left(-0.16666666666666666, \varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right), \varepsilon\right)\right)
\end{array}
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
associate-+l+N/A
*-commutativeN/A
distribute-rgt-inN/A
Simplified99.9%
Taylor expanded in x around 0
Simplified97.4%
(FPCore (x eps)
:precision binary64
(*
eps
(fma
x
(fma
x
(fma
eps
(* x 0.08333333333333333)
(fma (* eps eps) 0.08333333333333333 -0.5))
(* eps -0.5))
(fma -0.16666666666666666 (* eps eps) 1.0))))
double code(double x, double eps) {
return eps * fma(x, fma(x, fma(eps, (x * 0.08333333333333333), fma((eps * eps), 0.08333333333333333, -0.5)), (eps * -0.5)), fma(-0.16666666666666666, (eps * eps), 1.0));
}
function code(x, eps) return Float64(eps * fma(x, fma(x, fma(eps, Float64(x * 0.08333333333333333), fma(Float64(eps * eps), 0.08333333333333333, -0.5)), Float64(eps * -0.5)), fma(-0.16666666666666666, Float64(eps * eps), 1.0))) end
code[x_, eps_] := N[(eps * N[(x * N[(x * N[(eps * N[(x * 0.08333333333333333), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * 0.08333333333333333 + -0.5), $MachinePrecision]), $MachinePrecision] + N[(eps * -0.5), $MachinePrecision]), $MachinePrecision] + N[(-0.16666666666666666 * N[(eps * eps), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, x \cdot 0.08333333333333333, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.08333333333333333, -0.5\right)\right), \varepsilon \cdot -0.5\right), \mathsf{fma}\left(-0.16666666666666666, \varepsilon \cdot \varepsilon, 1\right)\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
associate-+l+N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
associate-*r*N/A
associate-*r*N/A
distribute-lft1-inN/A
lower-*.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f6499.6
Simplified99.6%
Taylor expanded in x around 0
associate-+r+N/A
+-commutativeN/A
lower-fma.f64N/A
Simplified97.4%
(FPCore (x eps) :precision binary64 (fma x (* -0.5 (* eps (fma x (fma -0.16666666666666666 (* eps eps) 1.0) eps))) (fma -0.16666666666666666 (* eps (* eps eps)) eps)))
double code(double x, double eps) {
return fma(x, (-0.5 * (eps * fma(x, fma(-0.16666666666666666, (eps * eps), 1.0), eps))), fma(-0.16666666666666666, (eps * (eps * eps)), eps));
}
function code(x, eps) return fma(x, Float64(-0.5 * Float64(eps * fma(x, fma(-0.16666666666666666, Float64(eps * eps), 1.0), eps))), fma(-0.16666666666666666, Float64(eps * Float64(eps * eps)), eps)) end
code[x_, eps_] := N[(x * N[(-0.5 * N[(eps * N[(x * N[(-0.16666666666666666 * N[(eps * eps), $MachinePrecision] + 1.0), $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.16666666666666666 * N[(eps * N[(eps * eps), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, -0.5 \cdot \left(\varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(-0.16666666666666666, \varepsilon \cdot \varepsilon, 1\right), \varepsilon\right)\right), \mathsf{fma}\left(-0.16666666666666666, \varepsilon \cdot \left(\varepsilon \cdot \varepsilon\right), \varepsilon\right)\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
associate-+l+N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
associate-*r*N/A
associate-*r*N/A
distribute-lft1-inN/A
lower-*.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f6499.6
Simplified99.6%
Taylor expanded in x around 0
Simplified97.3%
(FPCore (x eps) :precision binary64 (* eps (fma x (fma x (fma (* eps eps) 0.08333333333333333 -0.5) (* eps -0.5)) (fma -0.16666666666666666 (* eps eps) 1.0))))
double code(double x, double eps) {
return eps * fma(x, fma(x, fma((eps * eps), 0.08333333333333333, -0.5), (eps * -0.5)), fma(-0.16666666666666666, (eps * eps), 1.0));
}
function code(x, eps) return Float64(eps * fma(x, fma(x, fma(Float64(eps * eps), 0.08333333333333333, -0.5), Float64(eps * -0.5)), fma(-0.16666666666666666, Float64(eps * eps), 1.0))) end
code[x_, eps_] := N[(eps * N[(x * N[(x * N[(N[(eps * eps), $MachinePrecision] * 0.08333333333333333 + -0.5), $MachinePrecision] + N[(eps * -0.5), $MachinePrecision]), $MachinePrecision] + N[(-0.16666666666666666 * N[(eps * eps), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.08333333333333333, -0.5\right), \varepsilon \cdot -0.5\right), \mathsf{fma}\left(-0.16666666666666666, \varepsilon \cdot \varepsilon, 1\right)\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
associate-+l+N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
associate-*r*N/A
associate-*r*N/A
distribute-lft1-inN/A
lower-*.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f6499.6
Simplified99.6%
Taylor expanded in x around 0
associate-+r+N/A
+-commutativeN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
Simplified97.3%
(FPCore (x eps) :precision binary64 (* eps (fma x (* -0.5 (+ eps x)) (fma -0.16666666666666666 (* eps eps) 1.0))))
double code(double x, double eps) {
return eps * fma(x, (-0.5 * (eps + x)), fma(-0.16666666666666666, (eps * eps), 1.0));
}
function code(x, eps) return Float64(eps * fma(x, Float64(-0.5 * Float64(eps + x)), fma(-0.16666666666666666, Float64(eps * eps), 1.0))) end
code[x_, eps_] := N[(eps * N[(x * N[(-0.5 * N[(eps + x), $MachinePrecision]), $MachinePrecision] + N[(-0.16666666666666666 * N[(eps * eps), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(x, -0.5 \cdot \left(\varepsilon + x\right), \mathsf{fma}\left(-0.16666666666666666, \varepsilon \cdot \varepsilon, 1\right)\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
associate-+l+N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
associate-*r*N/A
associate-*r*N/A
distribute-lft1-inN/A
lower-*.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f6499.6
Simplified99.6%
Taylor expanded in x around 0
associate-+r+N/A
+-commutativeN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
Simplified97.3%
Taylor expanded in eps around 0
distribute-lft-outN/A
lower-*.f64N/A
lower-+.f6497.3
Simplified97.3%
(FPCore (x eps) :precision binary64 (fma (* eps -0.5) (* x (+ eps x)) eps))
double code(double x, double eps) {
return fma((eps * -0.5), (x * (eps + x)), eps);
}
function code(x, eps) return fma(Float64(eps * -0.5), Float64(x * Float64(eps + x)), eps) end
code[x_, eps_] := N[(N[(eps * -0.5), $MachinePrecision] * N[(x * N[(eps + x), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon \cdot -0.5, x \cdot \left(\varepsilon + x\right), \varepsilon\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
lower-cos.f6499.0
Simplified99.0%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6497.8
Simplified97.8%
Taylor expanded in x around 0
+-commutativeN/A
Simplified97.3%
Final simplification97.3%
(FPCore (x eps) :precision binary64 (* eps (fma (* x -0.5) (+ eps x) 1.0)))
double code(double x, double eps) {
return eps * fma((x * -0.5), (eps + x), 1.0);
}
function code(x, eps) return Float64(eps * fma(Float64(x * -0.5), Float64(eps + x), 1.0)) end
code[x_, eps_] := N[(eps * N[(N[(x * -0.5), $MachinePrecision] * N[(eps + x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(x \cdot -0.5, \varepsilon + x, 1\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
associate-+l+N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
associate-*r*N/A
associate-*r*N/A
distribute-lft1-inN/A
lower-*.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f6499.6
Simplified99.6%
Taylor expanded in x around 0
associate-+r+N/A
+-commutativeN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
Simplified97.3%
Taylor expanded in eps around 0
+-commutativeN/A
*-commutativeN/A
associate-*r*N/A
unpow2N/A
associate-*r*N/A
distribute-lft-outN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-+.f6497.3
Simplified97.3%
(FPCore (x eps) :precision binary64 (* eps (fma -0.16666666666666666 (* eps eps) 1.0)))
double code(double x, double eps) {
return eps * fma(-0.16666666666666666, (eps * eps), 1.0);
}
function code(x, eps) return Float64(eps * fma(-0.16666666666666666, Float64(eps * eps), 1.0)) end
code[x_, eps_] := N[(eps * N[(-0.16666666666666666 * N[(eps * eps), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(-0.16666666666666666, \varepsilon \cdot \varepsilon, 1\right)
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
associate-+l+N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-sin.f64N/A
associate-*r*N/A
associate-*r*N/A
distribute-lft1-inN/A
lower-*.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f6499.6
Simplified99.6%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6497.2
Simplified97.2%
(FPCore (x eps) :precision binary64 eps)
double code(double x, double eps) {
return eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps
end function
public static double code(double x, double eps) {
return eps;
}
def code(x, eps): return eps
function code(x, eps) return eps end
function tmp = code(x, eps) tmp = eps; end
code[x_, eps_] := eps
\begin{array}{l}
\\
\varepsilon
\end{array}
Initial program 63.8%
Taylor expanded in eps around 0
lower-*.f64N/A
lower-cos.f6498.4
Simplified98.4%
Taylor expanded in x around 0
Simplified97.2%
Final simplification97.2%
(FPCore (x eps) :precision binary64 (* (* 2.0 (cos (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
double code(double x, double eps) {
return (2.0 * cos((x + (eps / 2.0)))) * sin((eps / 2.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (2.0d0 * cos((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
end function
public static double code(double x, double eps) {
return (2.0 * Math.cos((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
}
def code(x, eps): return (2.0 * math.cos((x + (eps / 2.0)))) * math.sin((eps / 2.0))
function code(x, eps) return Float64(Float64(2.0 * cos(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0))) end
function tmp = code(x, eps) tmp = (2.0 * cos((x + (eps / 2.0)))) * sin((eps / 2.0)); end
code[x_, eps_] := N[(N[(2.0 * N[Cos[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(2 \cdot \cos \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
\end{array}
(FPCore (x eps) :precision binary64 (+ (* (sin x) (- (cos eps) 1.0)) (* (cos x) (sin eps))))
double code(double x, double eps) {
return (sin(x) * (cos(eps) - 1.0)) + (cos(x) * sin(eps));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (sin(x) * (cos(eps) - 1.0d0)) + (cos(x) * sin(eps))
end function
public static double code(double x, double eps) {
return (Math.sin(x) * (Math.cos(eps) - 1.0)) + (Math.cos(x) * Math.sin(eps));
}
def code(x, eps): return (math.sin(x) * (math.cos(eps) - 1.0)) + (math.cos(x) * math.sin(eps))
function code(x, eps) return Float64(Float64(sin(x) * Float64(cos(eps) - 1.0)) + Float64(cos(x) * sin(eps))) end
function tmp = code(x, eps) tmp = (sin(x) * (cos(eps) - 1.0)) + (cos(x) * sin(eps)); end
code[x_, eps_] := N[(N[(N[Sin[x], $MachinePrecision] * N[(N[Cos[eps], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[Cos[x], $MachinePrecision] * N[Sin[eps], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin x \cdot \left(\cos \varepsilon - 1\right) + \cos x \cdot \sin \varepsilon
\end{array}
(FPCore (x eps) :precision binary64 (* (* (cos (* 0.5 (- eps (* -2.0 x)))) (sin (* 0.5 eps))) 2.0))
double code(double x, double eps) {
return (cos((0.5 * (eps - (-2.0 * x)))) * sin((0.5 * eps))) * 2.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (cos((0.5d0 * (eps - ((-2.0d0) * x)))) * sin((0.5d0 * eps))) * 2.0d0
end function
public static double code(double x, double eps) {
return (Math.cos((0.5 * (eps - (-2.0 * x)))) * Math.sin((0.5 * eps))) * 2.0;
}
def code(x, eps): return (math.cos((0.5 * (eps - (-2.0 * x)))) * math.sin((0.5 * eps))) * 2.0
function code(x, eps) return Float64(Float64(cos(Float64(0.5 * Float64(eps - Float64(-2.0 * x)))) * sin(Float64(0.5 * eps))) * 2.0) end
function tmp = code(x, eps) tmp = (cos((0.5 * (eps - (-2.0 * x)))) * sin((0.5 * eps))) * 2.0; end
code[x_, eps_] := N[(N[(N[Cos[N[(0.5 * N[(eps - N[(-2.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\cos \left(0.5 \cdot \left(\varepsilon - -2 \cdot x\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot 2
\end{array}
herbie shell --seed 2024215
(FPCore (x eps)
:name "2sin (example 3.3)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (* 2 (cos (+ x (/ eps 2))) (sin (/ eps 2))))
:alt
(! :herbie-platform default (+ (* (sin x) (- (cos eps) 1)) (* (cos x) (sin eps))))
:alt
(! :herbie-platform default (* (cos (* 1/2 (- eps (* -2 x)))) (sin (* 1/2 eps)) 2))
(- (sin (+ x eps)) (sin x)))