
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
(FPCore (e v) :precision binary64 (* (fma (cos v) e -1.0) (/ (* e (sin v)) (- (pow (* (cos v) e) 2.0) 1.0))))
double code(double e, double v) {
return fma(cos(v), e, -1.0) * ((e * sin(v)) / (pow((cos(v) * e), 2.0) - 1.0));
}
function code(e, v) return Float64(fma(cos(v), e, -1.0) * Float64(Float64(e * sin(v)) / Float64((Float64(cos(v) * e) ^ 2.0) - 1.0))) end
code[e_, v_] := N[(N[(N[Cos[v], $MachinePrecision] * e + -1.0), $MachinePrecision] * N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(N[Power[N[(N[Cos[v], $MachinePrecision] * e), $MachinePrecision], 2.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\cos v, e, -1\right) \cdot \frac{e \cdot \sin v}{{\left(\cos v \cdot e\right)}^{2} - 1}
\end{array}
Initial program 99.7%
lift-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
flip-+N/A
associate-/r/N/A
lower-*.f64N/A
lower-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
lower--.f64N/A
pow2N/A
lower-pow.f64N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Final simplification99.7%
(FPCore (e v) :precision binary64 (* (/ (sin v) (fma (cos v) e 1.0)) e))
double code(double e, double v) {
return (sin(v) / fma(cos(v), e, 1.0)) * e;
}
function code(e, v) return Float64(Float64(sin(v) / fma(cos(v), e, 1.0)) * e) end
code[e_, v_] := N[(N[(N[Sin[v], $MachinePrecision] / N[(N[Cos[v], $MachinePrecision] * e + 1.0), $MachinePrecision]), $MachinePrecision] * e), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin v}{\mathsf{fma}\left(\cos v, e, 1\right)} \cdot e
\end{array}
Initial program 99.7%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6499.7
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6499.7
Applied rewrites99.7%
(FPCore (e v) :precision binary64 (* (/ e (+ 1.0 e)) (sin v)))
double code(double e, double v) {
return (e / (1.0 + e)) * sin(v);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e / (1.0d0 + e)) * sin(v)
end function
public static double code(double e, double v) {
return (e / (1.0 + e)) * Math.sin(v);
}
def code(e, v): return (e / (1.0 + e)) * math.sin(v)
function code(e, v) return Float64(Float64(e / Float64(1.0 + e)) * sin(v)) end
function tmp = code(e, v) tmp = (e / (1.0 + e)) * sin(v); end
code[e_, v_] := N[(N[(e / N[(1.0 + e), $MachinePrecision]), $MachinePrecision] * N[Sin[v], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{1 + e} \cdot \sin v
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
+-commutativeN/A
lower-+.f6498.7
Applied rewrites98.7%
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6498.7
Applied rewrites98.7%
Final simplification98.7%
(FPCore (e v) :precision binary64 (* (- 1.0 e) (* e (sin v))))
double code(double e, double v) {
return (1.0 - e) * (e * sin(v));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (1.0d0 - e) * (e * sin(v))
end function
public static double code(double e, double v) {
return (1.0 - e) * (e * Math.sin(v));
}
def code(e, v): return (1.0 - e) * (e * math.sin(v))
function code(e, v) return Float64(Float64(1.0 - e) * Float64(e * sin(v))) end
function tmp = code(e, v) tmp = (1.0 - e) * (e * sin(v)); end
code[e_, v_] := N[(N[(1.0 - e), $MachinePrecision] * N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - e\right) \cdot \left(e \cdot \sin v\right)
\end{array}
Initial program 99.7%
Taylor expanded in e around 0
distribute-lft-inN/A
*-commutativeN/A
mul-1-negN/A
associate-*r*N/A
distribute-lft-neg-inN/A
associate-*l*N/A
*-commutativeN/A
distribute-rgt1-inN/A
lower-*.f64N/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f6498.3
Applied rewrites98.3%
Taylor expanded in v around 0
Applied rewrites97.6%
Final simplification97.6%
(FPCore (e v) :precision binary64 (if (<= v 2.95e-8) (* (/ e (+ 1.0 e)) v) (* e (sin v))))
double code(double e, double v) {
double tmp;
if (v <= 2.95e-8) {
tmp = (e / (1.0 + e)) * v;
} else {
tmp = e * sin(v);
}
return tmp;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
real(8) :: tmp
if (v <= 2.95d-8) then
tmp = (e / (1.0d0 + e)) * v
else
tmp = e * sin(v)
end if
code = tmp
end function
public static double code(double e, double v) {
double tmp;
if (v <= 2.95e-8) {
tmp = (e / (1.0 + e)) * v;
} else {
tmp = e * Math.sin(v);
}
return tmp;
}
def code(e, v): tmp = 0 if v <= 2.95e-8: tmp = (e / (1.0 + e)) * v else: tmp = e * math.sin(v) return tmp
function code(e, v) tmp = 0.0 if (v <= 2.95e-8) tmp = Float64(Float64(e / Float64(1.0 + e)) * v); else tmp = Float64(e * sin(v)); end return tmp end
function tmp_2 = code(e, v) tmp = 0.0; if (v <= 2.95e-8) tmp = (e / (1.0 + e)) * v; else tmp = e * sin(v); end tmp_2 = tmp; end
code[e_, v_] := If[LessEqual[v, 2.95e-8], N[(N[(e / N[(1.0 + e), $MachinePrecision]), $MachinePrecision] * v), $MachinePrecision], N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;v \leq 2.95 \cdot 10^{-8}:\\
\;\;\;\;\frac{e}{1 + e} \cdot v\\
\mathbf{else}:\\
\;\;\;\;e \cdot \sin v\\
\end{array}
\end{array}
if v < 2.9499999999999999e-8Initial program 99.8%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6462.5
Applied rewrites62.5%
if 2.9499999999999999e-8 < v Initial program 99.4%
Taylor expanded in e around 0
*-commutativeN/A
lower-*.f64N/A
lower-sin.f6496.4
Applied rewrites96.4%
Final simplification69.4%
(FPCore (e v) :precision binary64 (* (/ e (+ 1.0 e)) v))
double code(double e, double v) {
return (e / (1.0 + e)) * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e / (1.0d0 + e)) * v
end function
public static double code(double e, double v) {
return (e / (1.0 + e)) * v;
}
def code(e, v): return (e / (1.0 + e)) * v
function code(e, v) return Float64(Float64(e / Float64(1.0 + e)) * v) end
function tmp = code(e, v) tmp = (e / (1.0 + e)) * v; end
code[e_, v_] := N[(N[(e / N[(1.0 + e), $MachinePrecision]), $MachinePrecision] * v), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{1 + e} \cdot v
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6450.7
Applied rewrites50.7%
Final simplification50.7%
(FPCore (e v) :precision binary64 (fma v e (* (* (- e) v) e)))
double code(double e, double v) {
return fma(v, e, ((-e * v) * e));
}
function code(e, v) return fma(v, e, Float64(Float64(Float64(-e) * v) * e)) end
code[e_, v_] := N[(v * e + N[(N[((-e) * v), $MachinePrecision] * e), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(v, e, \left(\left(-e\right) \cdot v\right) \cdot e\right)
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6450.7
Applied rewrites50.7%
Taylor expanded in e around 0
Applied rewrites48.7%
Taylor expanded in e around 0
Applied rewrites49.6%
Applied rewrites49.6%
(FPCore (e v) :precision binary64 (* (- e (* e e)) v))
double code(double e, double v) {
return (e - (e * e)) * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e - (e * e)) * v
end function
public static double code(double e, double v) {
return (e - (e * e)) * v;
}
def code(e, v): return (e - (e * e)) * v
function code(e, v) return Float64(Float64(e - Float64(e * e)) * v) end
function tmp = code(e, v) tmp = (e - (e * e)) * v; end
code[e_, v_] := N[(N[(e - N[(e * e), $MachinePrecision]), $MachinePrecision] * v), $MachinePrecision]
\begin{array}{l}
\\
\left(e - e \cdot e\right) \cdot v
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6450.7
Applied rewrites50.7%
Applied rewrites45.4%
Taylor expanded in e around 0
Applied rewrites49.6%
(FPCore (e v) :precision binary64 (* (* (- 1.0 e) e) v))
double code(double e, double v) {
return ((1.0 - e) * e) * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = ((1.0d0 - e) * e) * v
end function
public static double code(double e, double v) {
return ((1.0 - e) * e) * v;
}
def code(e, v): return ((1.0 - e) * e) * v
function code(e, v) return Float64(Float64(Float64(1.0 - e) * e) * v) end
function tmp = code(e, v) tmp = ((1.0 - e) * e) * v; end
code[e_, v_] := N[(N[(N[(1.0 - e), $MachinePrecision] * e), $MachinePrecision] * v), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(1 - e\right) \cdot e\right) \cdot v
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6450.7
Applied rewrites50.7%
Taylor expanded in e around 0
Applied rewrites49.6%
(FPCore (e v) :precision binary64 (* (- v (* e v)) e))
double code(double e, double v) {
return (v - (e * v)) * e;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (v - (e * v)) * e
end function
public static double code(double e, double v) {
return (v - (e * v)) * e;
}
def code(e, v): return (v - (e * v)) * e
function code(e, v) return Float64(Float64(v - Float64(e * v)) * e) end
function tmp = code(e, v) tmp = (v - (e * v)) * e; end
code[e_, v_] := N[(N[(v - N[(e * v), $MachinePrecision]), $MachinePrecision] * e), $MachinePrecision]
\begin{array}{l}
\\
\left(v - e \cdot v\right) \cdot e
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6450.7
Applied rewrites50.7%
Taylor expanded in e around 0
Applied rewrites48.7%
Taylor expanded in e around 0
Applied rewrites49.6%
Final simplification49.6%
(FPCore (e v) :precision binary64 (* (* (- 1.0 e) v) e))
double code(double e, double v) {
return ((1.0 - e) * v) * e;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = ((1.0d0 - e) * v) * e
end function
public static double code(double e, double v) {
return ((1.0 - e) * v) * e;
}
def code(e, v): return ((1.0 - e) * v) * e
function code(e, v) return Float64(Float64(Float64(1.0 - e) * v) * e) end
function tmp = code(e, v) tmp = ((1.0 - e) * v) * e; end
code[e_, v_] := N[(N[(N[(1.0 - e), $MachinePrecision] * v), $MachinePrecision] * e), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(1 - e\right) \cdot v\right) \cdot e
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6450.7
Applied rewrites50.7%
Taylor expanded in e around 0
Applied rewrites49.6%
(FPCore (e v) :precision binary64 (* e v))
double code(double e, double v) {
return e * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * v
end function
public static double code(double e, double v) {
return e * v;
}
def code(e, v): return e * v
function code(e, v) return Float64(e * v) end
function tmp = code(e, v) tmp = e * v; end
code[e_, v_] := N[(e * v), $MachinePrecision]
\begin{array}{l}
\\
e \cdot v
\end{array}
Initial program 99.7%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f6450.7
Applied rewrites50.7%
Taylor expanded in e around 0
Applied rewrites48.7%
Final simplification48.7%
herbie shell --seed 2024254
(FPCore (e v)
:name "Trigonometry A"
:precision binary64
:pre (and (<= 0.0 e) (<= e 1.0))
(/ (* e (sin v)) (+ 1.0 (* e (cos v)))))