
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Initial program 99.8%
Final simplification99.8%
(FPCore (e v) :precision binary64 (/ e (/ (+ 1.0 (* e (cos v))) (sin v))))
double code(double e, double v) {
return e / ((1.0 + (e * cos(v))) / sin(v));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e / ((1.0d0 + (e * cos(v))) / sin(v))
end function
public static double code(double e, double v) {
return e / ((1.0 + (e * Math.cos(v))) / Math.sin(v));
}
def code(e, v): return e / ((1.0 + (e * math.cos(v))) / math.sin(v))
function code(e, v) return Float64(e / Float64(Float64(1.0 + Float64(e * cos(v))) / sin(v))) end
function tmp = code(e, v) tmp = e / ((1.0 + (e * cos(v))) / sin(v)); end
code[e_, v_] := N[(e / N[(N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sin[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{\frac{1 + e \cdot \cos v}{\sin v}}
\end{array}
Initial program 99.8%
associate-/l*99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ e 1.0)))
double code(double e, double v) {
return (e * sin(v)) / (e + 1.0);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (e + 1.0d0)
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (e + 1.0);
}
def code(e, v): return (e * math.sin(v)) / (e + 1.0)
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(e + 1.0)) end
function tmp = code(e, v) tmp = (e * sin(v)) / (e + 1.0); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(e + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{e + 1}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0 99.3%
+-commutative99.3%
Simplified99.3%
Final simplification99.3%
(FPCore (e v) :precision binary64 (* e (sin v)))
double code(double e, double v) {
return e * sin(v);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * sin(v)
end function
public static double code(double e, double v) {
return e * Math.sin(v);
}
def code(e, v): return e * math.sin(v)
function code(e, v) return Float64(e * sin(v)) end
function tmp = code(e, v) tmp = e * sin(v); end
code[e_, v_] := N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \sin v
\end{array}
Initial program 99.8%
associate-/l*99.7%
Simplified99.7%
Taylor expanded in e around 0 97.9%
Final simplification97.9%
(FPCore (e v) :precision binary64 (/ e (+ (* v (+ (* e -0.5) (* -0.16666666666666666 (- -1.0 e)))) (+ (/ e v) (/ 1.0 v)))))
double code(double e, double v) {
return e / ((v * ((e * -0.5) + (-0.16666666666666666 * (-1.0 - e)))) + ((e / v) + (1.0 / v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e / ((v * ((e * (-0.5d0)) + ((-0.16666666666666666d0) * ((-1.0d0) - e)))) + ((e / v) + (1.0d0 / v)))
end function
public static double code(double e, double v) {
return e / ((v * ((e * -0.5) + (-0.16666666666666666 * (-1.0 - e)))) + ((e / v) + (1.0 / v)));
}
def code(e, v): return e / ((v * ((e * -0.5) + (-0.16666666666666666 * (-1.0 - e)))) + ((e / v) + (1.0 / v)))
function code(e, v) return Float64(e / Float64(Float64(v * Float64(Float64(e * -0.5) + Float64(-0.16666666666666666 * Float64(-1.0 - e)))) + Float64(Float64(e / v) + Float64(1.0 / v)))) end
function tmp = code(e, v) tmp = e / ((v * ((e * -0.5) + (-0.16666666666666666 * (-1.0 - e)))) + ((e / v) + (1.0 / v))); end
code[e_, v_] := N[(e / N[(N[(v * N[(N[(e * -0.5), $MachinePrecision] + N[(-0.16666666666666666 * N[(-1.0 - e), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(e / v), $MachinePrecision] + N[(1.0 / v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{v \cdot \left(e \cdot -0.5 + -0.16666666666666666 \cdot \left(-1 - e\right)\right) + \left(\frac{e}{v} + \frac{1}{v}\right)}
\end{array}
Initial program 99.8%
associate-/l*99.7%
Simplified99.7%
Taylor expanded in v around 0 55.4%
Final simplification55.4%
(FPCore (e v) :precision binary64 (* v (/ e (- e -1.0))))
double code(double e, double v) {
return v * (e / (e - -1.0));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = v * (e / (e - (-1.0d0)))
end function
public static double code(double e, double v) {
return v * (e / (e - -1.0));
}
def code(e, v): return v * (e / (e - -1.0))
function code(e, v) return Float64(v * Float64(e / Float64(e - -1.0))) end
function tmp = code(e, v) tmp = v * (e / (e - -1.0)); end
code[e_, v_] := N[(v * N[(e / N[(e - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
v \cdot \frac{e}{e - -1}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0 99.3%
+-commutative99.3%
Simplified99.3%
Taylor expanded in v around 0 54.3%
associate-/l*54.1%
*-rgt-identity54.1%
associate-*r/54.2%
+-commutative54.2%
associate-/r/54.3%
*-commutative54.3%
associate-*r/54.3%
*-rgt-identity54.3%
*-lft-identity54.3%
fma-def54.3%
metadata-eval54.3%
fma-neg54.3%
*-lft-identity54.3%
Simplified54.3%
Final simplification54.3%
(FPCore (e v) :precision binary64 (* e v))
double code(double e, double v) {
return e * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * v
end function
public static double code(double e, double v) {
return e * v;
}
def code(e, v): return e * v
function code(e, v) return Float64(e * v) end
function tmp = code(e, v) tmp = e * v; end
code[e_, v_] := N[(e * v), $MachinePrecision]
\begin{array}{l}
\\
e \cdot v
\end{array}
Initial program 99.8%
associate-/l*99.7%
Simplified99.7%
Taylor expanded in v around 0 54.2%
+-commutative54.2%
Simplified54.2%
Taylor expanded in e around 0 52.9%
Final simplification52.9%
(FPCore (e v) :precision binary64 v)
double code(double e, double v) {
return v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = v
end function
public static double code(double e, double v) {
return v;
}
def code(e, v): return v
function code(e, v) return v end
function tmp = code(e, v) tmp = v; end
code[e_, v_] := v
\begin{array}{l}
\\
v
\end{array}
Initial program 99.8%
associate-/l*99.7%
Simplified99.7%
Taylor expanded in v around 0 54.2%
+-commutative54.2%
Simplified54.2%
Taylor expanded in e around inf 4.6%
Final simplification4.6%
herbie shell --seed 2023178
(FPCore (e v)
:name "Trigonometry A"
:precision binary64
:pre (and (<= 0.0 e) (<= e 1.0))
(/ (* e (sin v)) (+ 1.0 (* e (cos v)))))