
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
(FPCore (e v) :precision binary64 (/ (* (sin v) e) (fma (cos v) e 1.0)))
double code(double e, double v) {
return (sin(v) * e) / fma(cos(v), e, 1.0);
}
function code(e, v) return Float64(Float64(sin(v) * e) / fma(cos(v), e, 1.0)) end
code[e_, v_] := N[(N[(N[Sin[v], $MachinePrecision] * e), $MachinePrecision] / N[(N[Cos[v], $MachinePrecision] * e + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin v \cdot e}{\mathsf{fma}\left(\cos v, e, 1\right)}
\end{array}
Initial program 99.8%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.8
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6499.8
Applied rewrites99.8%
(FPCore (e v) :precision binary64 (* (/ (sin v) (fma (cos v) e 1.0)) e))
double code(double e, double v) {
return (sin(v) / fma(cos(v), e, 1.0)) * e;
}
function code(e, v) return Float64(Float64(sin(v) / fma(cos(v), e, 1.0)) * e) end
code[e_, v_] := N[(N[(N[Sin[v], $MachinePrecision] / N[(N[Cos[v], $MachinePrecision] * e + 1.0), $MachinePrecision]), $MachinePrecision] * e), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin v}{\mathsf{fma}\left(\cos v, e, 1\right)} \cdot e
\end{array}
Initial program 99.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6499.8
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6499.8
Applied rewrites99.8%
(FPCore (e v)
:precision binary64
(*
(- e)
(/
-1.0
(+
(fma (fma -0.3333333333333333 e 0.16666666666666666) v (/ e v))
(pow v -1.0)))))
double code(double e, double v) {
return -e * (-1.0 / (fma(fma(-0.3333333333333333, e, 0.16666666666666666), v, (e / v)) + pow(v, -1.0)));
}
function code(e, v) return Float64(Float64(-e) * Float64(-1.0 / Float64(fma(fma(-0.3333333333333333, e, 0.16666666666666666), v, Float64(e / v)) + (v ^ -1.0)))) end
code[e_, v_] := N[((-e) * N[(-1.0 / N[(N[(N[(-0.3333333333333333 * e + 0.16666666666666666), $MachinePrecision] * v + N[(e / v), $MachinePrecision]), $MachinePrecision] + N[Power[v, -1.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-e\right) \cdot \frac{-1}{\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, e, 0.16666666666666666\right), v, \frac{e}{v}\right) + {v}^{-1}}
\end{array}
Initial program 99.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.5
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6499.5
Applied rewrites99.5%
Taylor expanded in v around 0
lower-/.f64N/A
associate-+r+N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
lower-fma.f64N/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-+.f6450.1
Applied rewrites50.1%
lift-/.f64N/A
frac-2negN/A
neg-mul-1N/A
*-commutativeN/A
neg-mul-1N/A
times-fracN/A
div-invN/A
metadata-evalN/A
*-commutativeN/A
neg-mul-1N/A
lift-neg.f64N/A
lower-*.f64N/A
lower-/.f6450.2
Applied rewrites50.2%
Taylor expanded in e around 0
Applied rewrites50.2%
Final simplification50.2%
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 e)))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + e);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + e)
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + e);
}
def code(e, v): return (e * math.sin(v)) / (1.0 + e)
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + e)) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + e); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + e), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-+.f6498.9
Applied rewrites98.9%
(FPCore (e v) :precision binary64 (* (sin v) e))
double code(double e, double v) {
return sin(v) * e;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = sin(v) * e
end function
public static double code(double e, double v) {
return Math.sin(v) * e;
}
def code(e, v): return math.sin(v) * e
function code(e, v) return Float64(sin(v) * e) end
function tmp = code(e, v) tmp = sin(v) * e; end
code[e_, v_] := N[(N[Sin[v], $MachinePrecision] * e), $MachinePrecision]
\begin{array}{l}
\\
\sin v \cdot e
\end{array}
Initial program 99.8%
Taylor expanded in e around 0
*-commutativeN/A
lower-*.f64N/A
lower-sin.f6497.8
Applied rewrites97.8%
(FPCore (e v)
:precision binary64
(*
e
(/
(- -1.0)
(/
(fma (* (fma e -0.3333333333333333 0.16666666666666666) v) v (+ 1.0 e))
v))))
double code(double e, double v) {
return e * (-(-1.0) / (fma((fma(e, -0.3333333333333333, 0.16666666666666666) * v), v, (1.0 + e)) / v));
}
function code(e, v) return Float64(e * Float64(Float64(-(-1.0)) / Float64(fma(Float64(fma(e, -0.3333333333333333, 0.16666666666666666) * v), v, Float64(1.0 + e)) / v))) end
code[e_, v_] := N[(e * N[((--1.0) / N[(N[(N[(N[(e * -0.3333333333333333 + 0.16666666666666666), $MachinePrecision] * v), $MachinePrecision] * v + N[(1.0 + e), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \frac{--1}{\frac{\mathsf{fma}\left(\mathsf{fma}\left(e, -0.3333333333333333, 0.16666666666666666\right) \cdot v, v, 1 + e\right)}{v}}
\end{array}
Initial program 99.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.5
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6499.5
Applied rewrites99.5%
Taylor expanded in v around 0
lower-/.f64N/A
associate-+r+N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
lower-fma.f64N/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-+.f6450.1
Applied rewrites50.1%
lift-/.f64N/A
frac-2negN/A
neg-mul-1N/A
*-commutativeN/A
neg-mul-1N/A
times-fracN/A
div-invN/A
metadata-evalN/A
*-commutativeN/A
neg-mul-1N/A
lift-neg.f64N/A
lower-*.f64N/A
lower-/.f6450.2
Applied rewrites50.2%
Final simplification50.2%
(FPCore (e v) :precision binary64 (* (- e) (/ -1.0 (/ (fma (* 0.16666666666666666 v) v (+ 1.0 e)) v))))
double code(double e, double v) {
return -e * (-1.0 / (fma((0.16666666666666666 * v), v, (1.0 + e)) / v));
}
function code(e, v) return Float64(Float64(-e) * Float64(-1.0 / Float64(fma(Float64(0.16666666666666666 * v), v, Float64(1.0 + e)) / v))) end
code[e_, v_] := N[((-e) * N[(-1.0 / N[(N[(N[(0.16666666666666666 * v), $MachinePrecision] * v + N[(1.0 + e), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-e\right) \cdot \frac{-1}{\frac{\mathsf{fma}\left(0.16666666666666666 \cdot v, v, 1 + e\right)}{v}}
\end{array}
Initial program 99.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.5
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6499.5
Applied rewrites99.5%
Taylor expanded in v around 0
lower-/.f64N/A
associate-+r+N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
lower-fma.f64N/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-+.f6450.1
Applied rewrites50.1%
lift-/.f64N/A
frac-2negN/A
neg-mul-1N/A
*-commutativeN/A
neg-mul-1N/A
times-fracN/A
div-invN/A
metadata-evalN/A
*-commutativeN/A
neg-mul-1N/A
lift-neg.f64N/A
lower-*.f64N/A
lower-/.f6450.2
Applied rewrites50.2%
Taylor expanded in e around 0
Applied rewrites50.2%
(FPCore (e v) :precision binary64 (/ (* e v) (+ 1.0 e)))
double code(double e, double v) {
return (e * v) / (1.0 + e);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * v) / (1.0d0 + e)
end function
public static double code(double e, double v) {
return (e * v) / (1.0 + e);
}
def code(e, v): return (e * v) / (1.0 + e)
function code(e, v) return Float64(Float64(e * v) / Float64(1.0 + e)) end
function tmp = code(e, v) tmp = (e * v) / (1.0 + e); end
code[e_, v_] := N[(N[(e * v), $MachinePrecision] / N[(1.0 + e), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot v}{1 + e}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6449.2
Applied rewrites49.2%
Applied rewrites49.2%
(FPCore (e v) :precision binary64 (* (/ e (+ 1.0 e)) v))
double code(double e, double v) {
return (e / (1.0 + e)) * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e / (1.0d0 + e)) * v
end function
public static double code(double e, double v) {
return (e / (1.0 + e)) * v;
}
def code(e, v): return (e / (1.0 + e)) * v
function code(e, v) return Float64(Float64(e / Float64(1.0 + e)) * v) end
function tmp = code(e, v) tmp = (e / (1.0 + e)) * v; end
code[e_, v_] := N[(N[(e / N[(1.0 + e), $MachinePrecision]), $MachinePrecision] * v), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{1 + e} \cdot v
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6449.2
Applied rewrites49.2%
(FPCore (e v) :precision binary64 (fma (* (- e) v) e (* e v)))
double code(double e, double v) {
return fma((-e * v), e, (e * v));
}
function code(e, v) return fma(Float64(Float64(-e) * v), e, Float64(e * v)) end
code[e_, v_] := N[(N[((-e) * v), $MachinePrecision] * e + N[(e * v), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(-e\right) \cdot v, e, e \cdot v\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6449.2
Applied rewrites49.2%
Taylor expanded in e around 0
Applied rewrites48.7%
Applied rewrites48.7%
(FPCore (e v) :precision binary64 (* (fma (- v) e v) e))
double code(double e, double v) {
return fma(-v, e, v) * e;
}
function code(e, v) return Float64(fma(Float64(-v), e, v) * e) end
code[e_, v_] := N[(N[((-v) * e + v), $MachinePrecision] * e), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-v, e, v\right) \cdot e
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6449.2
Applied rewrites49.2%
Taylor expanded in e around 0
Applied rewrites48.7%
(FPCore (e v) :precision binary64 (* e v))
double code(double e, double v) {
return e * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * v
end function
public static double code(double e, double v) {
return e * v;
}
def code(e, v): return e * v
function code(e, v) return Float64(e * v) end
function tmp = code(e, v) tmp = e * v; end
code[e_, v_] := N[(e * v), $MachinePrecision]
\begin{array}{l}
\\
e \cdot v
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
associate-*l/N/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f6449.2
Applied rewrites49.2%
Taylor expanded in e around 0
Applied rewrites48.1%
herbie shell --seed 2024307
(FPCore (e v)
:name "Trigonometry A"
:precision binary64
:pre (and (<= 0.0 e) (<= e 1.0))
(/ (* e (sin v)) (+ 1.0 (* e (cos v)))))