
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (e v) :precision binary64 (/ (sin v) (+ (cos v) (/ 1.0 e))))
double code(double e, double v) {
return sin(v) / (cos(v) + (1.0 / e));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = sin(v) / (cos(v) + (1.0d0 / e))
end function
public static double code(double e, double v) {
return Math.sin(v) / (Math.cos(v) + (1.0 / e));
}
def code(e, v): return math.sin(v) / (math.cos(v) + (1.0 / e))
function code(e, v) return Float64(sin(v) / Float64(cos(v) + Float64(1.0 / e))) end
function tmp = code(e, v) tmp = sin(v) / (cos(v) + (1.0 / e)); end
code[e_, v_] := N[(N[Sin[v], $MachinePrecision] / N[(N[Cos[v], $MachinePrecision] + N[(1.0 / e), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin v}{\cos v + \frac{1}{e}}
\end{array}
Initial program 99.9%
*-commutative99.9%
cos-neg99.9%
associate-/l*99.6%
+-commutative99.6%
cos-neg99.6%
metadata-eval99.6%
sub-neg99.6%
div-sub99.6%
*-commutative99.6%
associate-/l*99.6%
*-inverses99.6%
/-rgt-identity99.6%
metadata-eval99.6%
associate-/r*99.6%
neg-mul-199.6%
unsub-neg99.6%
neg-mul-199.6%
associate-/r*99.6%
metadata-eval99.6%
distribute-neg-frac99.6%
metadata-eval99.6%
Simplified99.6%
Final simplification99.6%
(FPCore (e v) :precision binary64 (* e (sin v)))
double code(double e, double v) {
return e * sin(v);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * sin(v)
end function
public static double code(double e, double v) {
return e * Math.sin(v);
}
def code(e, v): return e * math.sin(v)
function code(e, v) return Float64(e * sin(v)) end
function tmp = code(e, v) tmp = e * sin(v); end
code[e_, v_] := N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \sin v
\end{array}
Initial program 99.9%
*-commutative99.9%
cos-neg99.9%
associate-/l*99.6%
+-commutative99.6%
cos-neg99.6%
metadata-eval99.6%
sub-neg99.6%
div-sub99.6%
*-commutative99.6%
associate-/l*99.6%
*-inverses99.6%
/-rgt-identity99.6%
metadata-eval99.6%
associate-/r*99.6%
neg-mul-199.6%
unsub-neg99.6%
neg-mul-199.6%
associate-/r*99.6%
metadata-eval99.6%
distribute-neg-frac99.6%
metadata-eval99.6%
Simplified99.6%
Taylor expanded in e around 0 97.9%
Final simplification97.9%
(FPCore (e v)
:precision binary64
(*
e
(/
1.0
(+
(* v (- (* e -0.5) (* -0.16666666666666666 (+ e 1.0))))
(+ (/ 1.0 v) (/ e v))))))
double code(double e, double v) {
return e * (1.0 / ((v * ((e * -0.5) - (-0.16666666666666666 * (e + 1.0)))) + ((1.0 / v) + (e / v))));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * (1.0d0 / ((v * ((e * (-0.5d0)) - ((-0.16666666666666666d0) * (e + 1.0d0)))) + ((1.0d0 / v) + (e / v))))
end function
public static double code(double e, double v) {
return e * (1.0 / ((v * ((e * -0.5) - (-0.16666666666666666 * (e + 1.0)))) + ((1.0 / v) + (e / v))));
}
def code(e, v): return e * (1.0 / ((v * ((e * -0.5) - (-0.16666666666666666 * (e + 1.0)))) + ((1.0 / v) + (e / v))))
function code(e, v) return Float64(e * Float64(1.0 / Float64(Float64(v * Float64(Float64(e * -0.5) - Float64(-0.16666666666666666 * Float64(e + 1.0)))) + Float64(Float64(1.0 / v) + Float64(e / v))))) end
function tmp = code(e, v) tmp = e * (1.0 / ((v * ((e * -0.5) - (-0.16666666666666666 * (e + 1.0)))) + ((1.0 / v) + (e / v)))); end
code[e_, v_] := N[(e * N[(1.0 / N[(N[(v * N[(N[(e * -0.5), $MachinePrecision] - N[(-0.16666666666666666 * N[(e + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / v), $MachinePrecision] + N[(e / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \frac{1}{v \cdot \left(e \cdot -0.5 - -0.16666666666666666 \cdot \left(e + 1\right)\right) + \left(\frac{1}{v} + \frac{e}{v}\right)}
\end{array}
Initial program 99.9%
associate-/l*99.6%
div-inv99.7%
+-commutative99.7%
fma-def99.7%
Applied egg-rr99.7%
Taylor expanded in v around 0 52.6%
Final simplification52.6%
(FPCore (e v) :precision binary64 (* e (/ 1.0 (+ (+ (/ 1.0 v) (/ e v)) (* v (* e -0.3333333333333333))))))
double code(double e, double v) {
return e * (1.0 / (((1.0 / v) + (e / v)) + (v * (e * -0.3333333333333333))));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * (1.0d0 / (((1.0d0 / v) + (e / v)) + (v * (e * (-0.3333333333333333d0)))))
end function
public static double code(double e, double v) {
return e * (1.0 / (((1.0 / v) + (e / v)) + (v * (e * -0.3333333333333333))));
}
def code(e, v): return e * (1.0 / (((1.0 / v) + (e / v)) + (v * (e * -0.3333333333333333))))
function code(e, v) return Float64(e * Float64(1.0 / Float64(Float64(Float64(1.0 / v) + Float64(e / v)) + Float64(v * Float64(e * -0.3333333333333333))))) end
function tmp = code(e, v) tmp = e * (1.0 / (((1.0 / v) + (e / v)) + (v * (e * -0.3333333333333333)))); end
code[e_, v_] := N[(e * N[(1.0 / N[(N[(N[(1.0 / v), $MachinePrecision] + N[(e / v), $MachinePrecision]), $MachinePrecision] + N[(v * N[(e * -0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \frac{1}{\left(\frac{1}{v} + \frac{e}{v}\right) + v \cdot \left(e \cdot -0.3333333333333333\right)}
\end{array}
Initial program 99.9%
associate-/l*99.6%
div-inv99.7%
+-commutative99.7%
fma-def99.7%
Applied egg-rr99.7%
Taylor expanded in v around 0 52.6%
Taylor expanded in e around inf 52.1%
*-commutative52.1%
Simplified52.1%
Final simplification52.1%
(FPCore (e v) :precision binary64 (/ e (/ (+ e 1.0) v)))
double code(double e, double v) {
return e / ((e + 1.0) / v);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e / ((e + 1.0d0) / v)
end function
public static double code(double e, double v) {
return e / ((e + 1.0) / v);
}
def code(e, v): return e / ((e + 1.0) / v)
function code(e, v) return Float64(e / Float64(Float64(e + 1.0) / v)) end
function tmp = code(e, v) tmp = e / ((e + 1.0) / v); end
code[e_, v_] := N[(e / N[(N[(e + 1.0), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{\frac{e + 1}{v}}
\end{array}
Initial program 99.9%
Taylor expanded in v around 0 51.6%
associate-/l*51.5%
Simplified51.5%
Final simplification51.5%
(FPCore (e v) :precision binary64 (/ (* e v) (+ e 1.0)))
double code(double e, double v) {
return (e * v) / (e + 1.0);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * v) / (e + 1.0d0)
end function
public static double code(double e, double v) {
return (e * v) / (e + 1.0);
}
def code(e, v): return (e * v) / (e + 1.0)
function code(e, v) return Float64(Float64(e * v) / Float64(e + 1.0)) end
function tmp = code(e, v) tmp = (e * v) / (e + 1.0); end
code[e_, v_] := N[(N[(e * v), $MachinePrecision] / N[(e + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot v}{e + 1}
\end{array}
Initial program 99.9%
Taylor expanded in v around 0 51.6%
Final simplification51.6%
(FPCore (e v) :precision binary64 (* e v))
double code(double e, double v) {
return e * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * v
end function
public static double code(double e, double v) {
return e * v;
}
def code(e, v): return e * v
function code(e, v) return Float64(e * v) end
function tmp = code(e, v) tmp = e * v; end
code[e_, v_] := N[(e * v), $MachinePrecision]
\begin{array}{l}
\\
e \cdot v
\end{array}
Initial program 99.9%
Taylor expanded in v around 0 51.6%
associate-/l*51.5%
Simplified51.5%
Taylor expanded in e around 0 50.2%
*-commutative50.2%
Simplified50.2%
Final simplification50.2%
(FPCore (e v) :precision binary64 v)
double code(double e, double v) {
return v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = v
end function
public static double code(double e, double v) {
return v;
}
def code(e, v): return v
function code(e, v) return v end
function tmp = code(e, v) tmp = v; end
code[e_, v_] := v
\begin{array}{l}
\\
v
\end{array}
Initial program 99.9%
Taylor expanded in v around 0 51.6%
associate-/l*51.5%
Simplified51.5%
Taylor expanded in e around inf 4.7%
Final simplification4.7%
herbie shell --seed 2023338
(FPCore (e v)
:name "Trigonometry A"
:precision binary64
:pre (and (<= 0.0 e) (<= e 1.0))
(/ (* e (sin v)) (+ 1.0 (* e (cos v)))))