
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
(FPCore (e v) :precision binary64 (* e (* (sin v) (/ (- -1.0) (fma e (cos v) 1.0)))))
double code(double e, double v) {
return e * (sin(v) * (-(-1.0) / fma(e, cos(v), 1.0)));
}
function code(e, v) return Float64(e * Float64(sin(v) * Float64(Float64(-(-1.0)) / fma(e, cos(v), 1.0)))) end
code[e_, v_] := N[(e * N[(N[Sin[v], $MachinePrecision] * N[((--1.0) / N[(e * N[Cos[v], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \left(\sin v \cdot \frac{--1}{\mathsf{fma}\left(e, \cos v, 1\right)}\right)
\end{array}
Initial program 99.8%
lift-/.f64N/A
frac-2negN/A
div-invN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*l*N/A
lower-*.f64N/A
lower-neg.f64N/A
lower-*.f64N/A
metadata-evalN/A
frac-2negN/A
lower-/.f6499.8
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6499.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (e v) :precision binary64 (* e (/ (sin v) (fma e (cos v) 1.0))))
double code(double e, double v) {
return e * (sin(v) / fma(e, cos(v), 1.0));
}
function code(e, v) return Float64(e * Float64(sin(v) / fma(e, cos(v), 1.0))) end
code[e_, v_] := N[(e * N[(N[Sin[v], $MachinePrecision] / N[(e * N[Cos[v], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \frac{\sin v}{\mathsf{fma}\left(e, \cos v, 1\right)}
\end{array}
Initial program 99.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6499.8
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6499.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ e 1.0)))
double code(double e, double v) {
return (e * sin(v)) / (e + 1.0);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (e + 1.0d0)
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (e + 1.0);
}
def code(e, v): return (e * math.sin(v)) / (e + 1.0)
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(e + 1.0)) end
function tmp = code(e, v) tmp = (e * sin(v)) / (e + 1.0); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(e + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{e + 1}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-+.f6499.0
Applied rewrites99.0%
Final simplification99.0%
(FPCore (e v) :precision binary64 (* (sin v) (- e (* e e))))
double code(double e, double v) {
return sin(v) * (e - (e * e));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = sin(v) * (e - (e * e))
end function
public static double code(double e, double v) {
return Math.sin(v) * (e - (e * e));
}
def code(e, v): return math.sin(v) * (e - (e * e))
function code(e, v) return Float64(sin(v) * Float64(e - Float64(e * e))) end
function tmp = code(e, v) tmp = sin(v) * (e - (e * e)); end
code[e_, v_] := N[(N[Sin[v], $MachinePrecision] * N[(e - N[(e * e), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin v \cdot \left(e - e \cdot e\right)
\end{array}
Initial program 99.8%
Taylor expanded in e around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
associate-*r*N/A
mul-1-negN/A
distribute-rgt-neg-outN/A
unpow2N/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-sin.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
Applied rewrites98.4%
Taylor expanded in v around 0
Applied rewrites97.9%
(FPCore (e v)
:precision binary64
(let* ((t_0 (/ e (+ e 1.0))))
(if (<= v 2e-67)
(* v (fma (* v v) (* t_0 (fma 0.5 t_0 -0.16666666666666666)) t_0))
(* e (sin v)))))
double code(double e, double v) {
double t_0 = e / (e + 1.0);
double tmp;
if (v <= 2e-67) {
tmp = v * fma((v * v), (t_0 * fma(0.5, t_0, -0.16666666666666666)), t_0);
} else {
tmp = e * sin(v);
}
return tmp;
}
function code(e, v) t_0 = Float64(e / Float64(e + 1.0)) tmp = 0.0 if (v <= 2e-67) tmp = Float64(v * fma(Float64(v * v), Float64(t_0 * fma(0.5, t_0, -0.16666666666666666)), t_0)); else tmp = Float64(e * sin(v)); end return tmp end
code[e_, v_] := Block[{t$95$0 = N[(e / N[(e + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[v, 2e-67], N[(v * N[(N[(v * v), $MachinePrecision] * N[(t$95$0 * N[(0.5 * t$95$0 + -0.16666666666666666), $MachinePrecision]), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision], N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{e}{e + 1}\\
\mathbf{if}\;v \leq 2 \cdot 10^{-67}:\\
\;\;\;\;v \cdot \mathsf{fma}\left(v \cdot v, t\_0 \cdot \mathsf{fma}\left(0.5, t\_0, -0.16666666666666666\right), t\_0\right)\\
\mathbf{else}:\\
\;\;\;\;e \cdot \sin v\\
\end{array}
\end{array}
if v < 1.99999999999999989e-67Initial program 99.8%
Taylor expanded in v around 0
lower-*.f64N/A
lower-fma.f64N/A
Applied rewrites63.8%
if 1.99999999999999989e-67 < v Initial program 99.8%
Taylor expanded in e around 0
lower-*.f64N/A
lower-sin.f6499.3
Applied rewrites99.3%
Final simplification74.3%
(FPCore (e v)
:precision binary64
(/
e
(/
(fma
(* v v)
(fma e -0.5 (fma e 0.16666666666666666 0.16666666666666666))
(+ e 1.0))
v)))
double code(double e, double v) {
return e / (fma((v * v), fma(e, -0.5, fma(e, 0.16666666666666666, 0.16666666666666666)), (e + 1.0)) / v);
}
function code(e, v) return Float64(e / Float64(fma(Float64(v * v), fma(e, -0.5, fma(e, 0.16666666666666666, 0.16666666666666666)), Float64(e + 1.0)) / v)) end
code[e_, v_] := N[(e / N[(N[(N[(v * v), $MachinePrecision] * N[(e * -0.5 + N[(e * 0.16666666666666666 + 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + N[(e + 1.0), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{\frac{\mathsf{fma}\left(v \cdot v, \mathsf{fma}\left(e, -0.5, \mathsf{fma}\left(e, 0.16666666666666666, 0.16666666666666666\right)\right), e + 1\right)}{v}}
\end{array}
Initial program 99.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.6
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6499.6
Applied rewrites99.6%
Taylor expanded in v around 0
lower-/.f64N/A
associate-+r+N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
lower-fma.f64N/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-commutativeN/A
distribute-rgt-inN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f6451.9
Applied rewrites51.9%
(FPCore (e v) :precision binary64 (/ (* e v) (+ e 1.0)))
double code(double e, double v) {
return (e * v) / (e + 1.0);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * v) / (e + 1.0d0)
end function
public static double code(double e, double v) {
return (e * v) / (e + 1.0);
}
def code(e, v): return (e * v) / (e + 1.0)
function code(e, v) return Float64(Float64(e * v) / Float64(e + 1.0)) end
function tmp = code(e, v) tmp = (e * v) / (e + 1.0); end
code[e_, v_] := N[(N[(e * v), $MachinePrecision] / N[(e + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot v}{e + 1}
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6450.9
Applied rewrites50.9%
Final simplification50.9%
(FPCore (e v) :precision binary64 (* e (fma e (- (* e v) v) v)))
double code(double e, double v) {
return e * fma(e, ((e * v) - v), v);
}
function code(e, v) return Float64(e * fma(e, Float64(Float64(e * v) - v), v)) end
code[e_, v_] := N[(e * N[(e * N[(N[(e * v), $MachinePrecision] - v), $MachinePrecision] + v), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \mathsf{fma}\left(e, e \cdot v - v, v\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6450.9
Applied rewrites50.9%
Taylor expanded in e around 0
Applied rewrites50.2%
(FPCore (e v) :precision binary64 (fma v e (* e (- (* e v)))))
double code(double e, double v) {
return fma(v, e, (e * -(e * v)));
}
function code(e, v) return fma(v, e, Float64(e * Float64(-Float64(e * v)))) end
code[e_, v_] := N[(v * e + N[(e * (-N[(e * v), $MachinePrecision])), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(v, e, e \cdot \left(-e \cdot v\right)\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6450.9
Applied rewrites50.9%
Taylor expanded in e around 0
Applied rewrites49.2%
Taylor expanded in e around 0
Applied rewrites49.8%
Applied rewrites49.8%
Final simplification49.8%
(FPCore (e v) :precision binary64 (* v (- e (* e e))))
double code(double e, double v) {
return v * (e - (e * e));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = v * (e - (e * e))
end function
public static double code(double e, double v) {
return v * (e - (e * e));
}
def code(e, v): return v * (e - (e * e))
function code(e, v) return Float64(v * Float64(e - Float64(e * e))) end
function tmp = code(e, v) tmp = v * (e - (e * e)); end
code[e_, v_] := N[(v * N[(e - N[(e * e), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
v \cdot \left(e - e \cdot e\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6450.9
Applied rewrites50.9%
Taylor expanded in e around 0
Applied rewrites49.8%
(FPCore (e v) :precision binary64 (* e (* v (- 1.0 e))))
double code(double e, double v) {
return e * (v * (1.0 - e));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * (v * (1.0d0 - e))
end function
public static double code(double e, double v) {
return e * (v * (1.0 - e));
}
def code(e, v): return e * (v * (1.0 - e))
function code(e, v) return Float64(e * Float64(v * Float64(1.0 - e))) end
function tmp = code(e, v) tmp = e * (v * (1.0 - e)); end
code[e_, v_] := N[(e * N[(v * N[(1.0 - e), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \left(v \cdot \left(1 - e\right)\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6450.9
Applied rewrites50.9%
Taylor expanded in e around 0
Applied rewrites49.2%
Taylor expanded in e around 0
Applied rewrites49.8%
Applied rewrites49.8%
Final simplification49.8%
(FPCore (e v) :precision binary64 (* e v))
double code(double e, double v) {
return e * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * v
end function
public static double code(double e, double v) {
return e * v;
}
def code(e, v): return e * v
function code(e, v) return Float64(e * v) end
function tmp = code(e, v) tmp = e * v; end
code[e_, v_] := N[(e * v), $MachinePrecision]
\begin{array}{l}
\\
e \cdot v
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
lower-/.f64N/A
lower-*.f64N/A
lower-+.f6450.9
Applied rewrites50.9%
Taylor expanded in e around 0
Applied rewrites49.2%
herbie shell --seed 2024226
(FPCore (e v)
:name "Trigonometry A"
:precision binary64
:pre (and (<= 0.0 e) (<= e 1.0))
(/ (* e (sin v)) (+ 1.0 (* e (cos v)))))