
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (fma (cos v) e 1.0)))
double code(double e, double v) {
return (e * sin(v)) / fma(cos(v), e, 1.0);
}
function code(e, v) return Float64(Float64(e * sin(v)) / fma(cos(v), e, 1.0)) end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(N[Cos[v], $MachinePrecision] * e + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{\mathsf{fma}\left(\cos v, e, 1\right)}
\end{array}
Initial program 99.8%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6499.8
Applied rewrites99.8%
(FPCore (e v) :precision binary64 (* (sin v) (fma (cos v) (- (* e e)) e)))
double code(double e, double v) {
return sin(v) * fma(cos(v), -(e * e), e);
}
function code(e, v) return Float64(sin(v) * fma(cos(v), Float64(-Float64(e * e)), e)) end
code[e_, v_] := N[(N[Sin[v], $MachinePrecision] * N[(N[Cos[v], $MachinePrecision] * (-N[(e * e), $MachinePrecision]) + e), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin v \cdot \mathsf{fma}\left(\cos v, -e \cdot e, e\right)
\end{array}
Initial program 99.8%
Taylor expanded in e around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
associate-*r*N/A
mul-1-negN/A
distribute-rgt-neg-outN/A
unpow2N/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-sin.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
Applied rewrites99.1%
Final simplification99.1%
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ e 1.0)))
double code(double e, double v) {
return (e * sin(v)) / (e + 1.0);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (e + 1.0d0)
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (e + 1.0);
}
def code(e, v): return (e * math.sin(v)) / (e + 1.0)
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(e + 1.0)) end
function tmp = code(e, v) tmp = (e * sin(v)) / (e + 1.0); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(e + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{e + 1}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-+.f6498.8
Applied rewrites98.8%
Final simplification98.8%
(FPCore (e v) :precision binary64 (* e (sin v)))
double code(double e, double v) {
return e * sin(v);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * sin(v)
end function
public static double code(double e, double v) {
return e * Math.sin(v);
}
def code(e, v): return e * math.sin(v)
function code(e, v) return Float64(e * sin(v)) end
function tmp = code(e, v) tmp = e * sin(v); end
code[e_, v_] := N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \sin v
\end{array}
Initial program 99.8%
Taylor expanded in e around 0
lower-*.f64N/A
lower-sin.f6498.0
Applied rewrites98.0%
(FPCore (e v)
:precision binary64
(/
e
(/
(fma
(* v v)
(fma e -0.5 (fma e 0.16666666666666666 0.16666666666666666))
(+ e 1.0))
v)))
double code(double e, double v) {
return e / (fma((v * v), fma(e, -0.5, fma(e, 0.16666666666666666, 0.16666666666666666)), (e + 1.0)) / v);
}
function code(e, v) return Float64(e / Float64(fma(Float64(v * v), fma(e, -0.5, fma(e, 0.16666666666666666, 0.16666666666666666)), Float64(e + 1.0)) / v)) end
code[e_, v_] := N[(e / N[(N[(N[(v * v), $MachinePrecision] * N[(e * -0.5 + N[(e * 0.16666666666666666 + 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + N[(e + 1.0), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{\frac{\mathsf{fma}\left(v \cdot v, \mathsf{fma}\left(e, -0.5, \mathsf{fma}\left(e, 0.16666666666666666, 0.16666666666666666\right)\right), e + 1\right)}{v}}
\end{array}
Initial program 99.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.6
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6499.6
Applied rewrites99.6%
Taylor expanded in v around 0
lower-/.f64N/A
associate-+r+N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
lower-fma.f64N/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-commutativeN/A
distribute-rgt-inN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f6455.3
Applied rewrites55.3%
(FPCore (e v) :precision binary64 (/ (* e v) (fma e (fma v (* v -0.5) 1.0) 1.0)))
double code(double e, double v) {
return (e * v) / fma(e, fma(v, (v * -0.5), 1.0), 1.0);
}
function code(e, v) return Float64(Float64(e * v) / fma(e, fma(v, Float64(v * -0.5), 1.0), 1.0)) end
code[e_, v_] := N[(N[(e * v), $MachinePrecision] / N[(e * N[(v * N[(v * -0.5), $MachinePrecision] + 1.0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot v}{\mathsf{fma}\left(e, \mathsf{fma}\left(v, v \cdot -0.5, 1\right), 1\right)}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
+-commutativeN/A
*-lft-identityN/A
*-commutativeN/A
associate-*r*N/A
distribute-rgt-outN/A
lower-fma.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6464.1
Applied rewrites64.1%
Taylor expanded in v around 0
lower-*.f6455.0
Applied rewrites55.0%
(FPCore (e v) :precision binary64 (* (/ (* e v) (- 1.0 (* e e))) (- 1.0 e)))
double code(double e, double v) {
return ((e * v) / (1.0 - (e * e))) * (1.0 - e);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = ((e * v) / (1.0d0 - (e * e))) * (1.0d0 - e)
end function
public static double code(double e, double v) {
return ((e * v) / (1.0 - (e * e))) * (1.0 - e);
}
def code(e, v): return ((e * v) / (1.0 - (e * e))) * (1.0 - e)
function code(e, v) return Float64(Float64(Float64(e * v) / Float64(1.0 - Float64(e * e))) * Float64(1.0 - e)) end
function tmp = code(e, v) tmp = ((e * v) / (1.0 - (e * e))) * (1.0 - e); end
code[e_, v_] := N[(N[(N[(e * v), $MachinePrecision] / N[(1.0 - N[(e * e), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 - e), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot v}{1 - e \cdot e} \cdot \left(1 - e\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6454.1
Applied rewrites54.1%
Applied rewrites54.1%
(FPCore (e v) :precision binary64 (/ (* e v) (+ e 1.0)))
double code(double e, double v) {
return (e * v) / (e + 1.0);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * v) / (e + 1.0d0)
end function
public static double code(double e, double v) {
return (e * v) / (e + 1.0);
}
def code(e, v): return (e * v) / (e + 1.0)
function code(e, v) return Float64(Float64(e * v) / Float64(e + 1.0)) end
function tmp = code(e, v) tmp = (e * v) / (e + 1.0); end
code[e_, v_] := N[(N[(e * v), $MachinePrecision] / N[(e + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot v}{e + 1}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6454.1
Applied rewrites54.1%
Final simplification54.1%
(FPCore (e v) :precision binary64 (* e (fma e (- (* e v) v) v)))
double code(double e, double v) {
return e * fma(e, ((e * v) - v), v);
}
function code(e, v) return Float64(e * fma(e, Float64(Float64(e * v) - v), v)) end
code[e_, v_] := N[(e * N[(e * N[(N[(e * v), $MachinePrecision] - v), $MachinePrecision] + v), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \mathsf{fma}\left(e, e \cdot v - v, v\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6454.1
Applied rewrites54.1%
Taylor expanded in e around 0
Applied rewrites53.9%
(FPCore (e v) :precision binary64 (* e (- v (* e v))))
double code(double e, double v) {
return e * (v - (e * v));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * (v - (e * v))
end function
public static double code(double e, double v) {
return e * (v - (e * v));
}
def code(e, v): return e * (v - (e * v))
function code(e, v) return Float64(e * Float64(v - Float64(e * v))) end
function tmp = code(e, v) tmp = e * (v - (e * v)); end
code[e_, v_] := N[(e * N[(v - N[(e * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \left(v - e \cdot v\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6454.1
Applied rewrites54.1%
Taylor expanded in e around 0
Applied rewrites53.7%
(FPCore (e v) :precision binary64 (* e v))
double code(double e, double v) {
return e * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * v
end function
public static double code(double e, double v) {
return e * v;
}
def code(e, v): return e * v
function code(e, v) return Float64(e * v) end
function tmp = code(e, v) tmp = e * v; end
code[e_, v_] := N[(e * v), $MachinePrecision]
\begin{array}{l}
\\
e \cdot v
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6454.1
Applied rewrites54.1%
Taylor expanded in e around 0
Applied rewrites53.2%
herbie shell --seed 2024233
(FPCore (e v)
:name "Trigonometry A"
:precision binary64
:pre (and (<= 0.0 e) (<= e 1.0))
(/ (* e (sin v)) (+ 1.0 (* e (cos v)))))