
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (e v) :precision binary64 (/ (* e (sin v)) (+ 1.0 (* e (cos v)))))
double code(double e, double v) {
return (e * sin(v)) / (1.0 + (e * cos(v)));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = (e * sin(v)) / (1.0d0 + (e * cos(v)))
end function
public static double code(double e, double v) {
return (e * Math.sin(v)) / (1.0 + (e * Math.cos(v)));
}
def code(e, v): return (e * math.sin(v)) / (1.0 + (e * math.cos(v)))
function code(e, v) return Float64(Float64(e * sin(v)) / Float64(1.0 + Float64(e * cos(v)))) end
function tmp = code(e, v) tmp = (e * sin(v)) / (1.0 + (e * cos(v))); end
code[e_, v_] := N[(N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(e * N[Cos[v], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot \sin v}{1 + e \cdot \cos v}
\end{array}
(FPCore (e v) :precision binary64 (* (/ e (fma e (cos v) 1.0)) (sin v)))
double code(double e, double v) {
return (e / fma(e, cos(v), 1.0)) * sin(v);
}
function code(e, v) return Float64(Float64(e / fma(e, cos(v), 1.0)) * sin(v)) end
code[e_, v_] := N[(N[(e / N[(e * N[Cos[v], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * N[Sin[v], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{\mathsf{fma}\left(e, \cos v, 1\right)} \cdot \sin v
\end{array}
Initial program 99.8%
*-commutativeN/A
associate-/l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.8
Applied egg-rr99.8%
(FPCore (e v) :precision binary64 (* (sin v) (fma (cos v) (* e (- e)) e)))
double code(double e, double v) {
return sin(v) * fma(cos(v), (e * -e), e);
}
function code(e, v) return Float64(sin(v) * fma(cos(v), Float64(e * Float64(-e)), e)) end
code[e_, v_] := N[(N[Sin[v], $MachinePrecision] * N[(N[Cos[v], $MachinePrecision] * N[(e * (-e)), $MachinePrecision] + e), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin v \cdot \mathsf{fma}\left(\cos v, e \cdot \left(-e\right), e\right)
\end{array}
Initial program 99.8%
Taylor expanded in e around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
associate-*r*N/A
mul-1-negN/A
distribute-rgt-neg-outN/A
unpow2N/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f64N/A
Simplified99.6%
(FPCore (e v) :precision binary64 (* (sin v) (- e (* e e))))
double code(double e, double v) {
return sin(v) * (e - (e * e));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = sin(v) * (e - (e * e))
end function
public static double code(double e, double v) {
return Math.sin(v) * (e - (e * e));
}
def code(e, v): return math.sin(v) * (e - (e * e))
function code(e, v) return Float64(sin(v) * Float64(e - Float64(e * e))) end
function tmp = code(e, v) tmp = sin(v) * (e - (e * e)); end
code[e_, v_] := N[(N[Sin[v], $MachinePrecision] * N[(e - N[(e * e), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin v \cdot \left(e - e \cdot e\right)
\end{array}
Initial program 99.8%
Taylor expanded in e around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
associate-*r*N/A
mul-1-negN/A
distribute-rgt-neg-outN/A
unpow2N/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f64N/A
Simplified99.6%
Taylor expanded in v around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6499.1
Simplified99.1%
(FPCore (e v) :precision binary64 (* e (sin v)))
double code(double e, double v) {
return e * sin(v);
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * sin(v)
end function
public static double code(double e, double v) {
return e * Math.sin(v);
}
def code(e, v): return e * math.sin(v)
function code(e, v) return Float64(e * sin(v)) end
function tmp = code(e, v) tmp = e * sin(v); end
code[e_, v_] := N[(e * N[Sin[v], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \sin v
\end{array}
Initial program 99.8%
Taylor expanded in e around 0
*-lowering-*.f64N/A
sin-lowering-sin.f6498.7
Simplified98.7%
(FPCore (e v)
:precision binary64
(/
e
(/
(+
e
(fma
(* v v)
(fma e -0.5 (fma e 0.16666666666666666 0.16666666666666666))
1.0))
v)))
double code(double e, double v) {
return e / ((e + fma((v * v), fma(e, -0.5, fma(e, 0.16666666666666666, 0.16666666666666666)), 1.0)) / v);
}
function code(e, v) return Float64(e / Float64(Float64(e + fma(Float64(v * v), fma(e, -0.5, fma(e, 0.16666666666666666, 0.16666666666666666)), 1.0)) / v)) end
code[e_, v_] := N[(e / N[(N[(e + N[(N[(v * v), $MachinePrecision] * N[(e * -0.5 + N[(e * 0.16666666666666666 + 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] / v), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e}{\frac{e + \mathsf{fma}\left(v \cdot v, \mathsf{fma}\left(e, -0.5, \mathsf{fma}\left(e, 0.16666666666666666, 0.16666666666666666\right)\right), 1\right)}{v}}
\end{array}
Initial program 99.8%
associate-/l*N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.7
Applied egg-rr99.7%
Taylor expanded in v around 0
/-lowering-/.f64N/A
+-commutativeN/A
associate-+l+N/A
+-lowering-+.f64N/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
distribute-lft-neg-inN/A
metadata-evalN/A
+-commutativeN/A
distribute-rgt-inN/A
metadata-evalN/A
accelerator-lowering-fma.f6455.1
Simplified55.1%
(FPCore (e v) :precision binary64 (* (/ (* e v) (fma e e -1.0)) (+ e -1.0)))
double code(double e, double v) {
return ((e * v) / fma(e, e, -1.0)) * (e + -1.0);
}
function code(e, v) return Float64(Float64(Float64(e * v) / fma(e, e, -1.0)) * Float64(e + -1.0)) end
code[e_, v_] := N[(N[(N[(e * v), $MachinePrecision] / N[(e * e + -1.0), $MachinePrecision]), $MachinePrecision] * N[(e + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e \cdot v}{\mathsf{fma}\left(e, e, -1\right)} \cdot \left(e + -1\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
/-lowering-/.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f6453.9
Simplified53.9%
+-commutativeN/A
flip-+N/A
associate-/r/N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6454.0
Applied egg-rr54.0%
(FPCore (e v) :precision binary64 (* v (- e (* e e))))
double code(double e, double v) {
return v * (e - (e * e));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = v * (e - (e * e))
end function
public static double code(double e, double v) {
return v * (e - (e * e));
}
def code(e, v): return v * (e - (e * e))
function code(e, v) return Float64(v * Float64(e - Float64(e * e))) end
function tmp = code(e, v) tmp = v * (e - (e * e)); end
code[e_, v_] := N[(v * N[(e - N[(e * e), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
v \cdot \left(e - e \cdot e\right)
\end{array}
Initial program 99.8%
Taylor expanded in e around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
associate-*r*N/A
mul-1-negN/A
distribute-rgt-neg-outN/A
unpow2N/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f64N/A
Simplified99.6%
Taylor expanded in v around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6499.1
Simplified99.1%
Taylor expanded in v around 0
*-lowering-*.f64N/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6454.0
Simplified54.0%
(FPCore (e v) :precision binary64 (* e (- v (* e v))))
double code(double e, double v) {
return e * (v - (e * v));
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * (v - (e * v))
end function
public static double code(double e, double v) {
return e * (v - (e * v));
}
def code(e, v): return e * (v - (e * v))
function code(e, v) return Float64(e * Float64(v - Float64(e * v))) end
function tmp = code(e, v) tmp = e * (v - (e * v)); end
code[e_, v_] := N[(e * N[(v - N[(e * v), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e \cdot \left(v - e \cdot v\right)
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
/-lowering-/.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f6453.9
Simplified53.9%
Taylor expanded in e around 0
*-lowering-*.f64N/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
*-lowering-*.f6453.9
Simplified53.9%
(FPCore (e v) :precision binary64 (* e v))
double code(double e, double v) {
return e * v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = e * v
end function
public static double code(double e, double v) {
return e * v;
}
def code(e, v): return e * v
function code(e, v) return Float64(e * v) end
function tmp = code(e, v) tmp = e * v; end
code[e_, v_] := N[(e * v), $MachinePrecision]
\begin{array}{l}
\\
e \cdot v
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
/-lowering-/.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f6453.9
Simplified53.9%
Taylor expanded in e around 0
*-lowering-*.f6453.5
Simplified53.5%
(FPCore (e v) :precision binary64 v)
double code(double e, double v) {
return v;
}
real(8) function code(e, v)
real(8), intent (in) :: e
real(8), intent (in) :: v
code = v
end function
public static double code(double e, double v) {
return v;
}
def code(e, v): return v
function code(e, v) return v end
function tmp = code(e, v) tmp = v; end
code[e_, v_] := v
\begin{array}{l}
\\
v
\end{array}
Initial program 99.8%
Taylor expanded in v around 0
/-lowering-/.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f6453.9
Simplified53.9%
Taylor expanded in e around inf
Simplified4.6%
herbie shell --seed 2024205
(FPCore (e v)
:name "Trigonometry A"
:precision binary64
:pre (and (<= 0.0 e) (<= e 1.0))
(/ (* e (sin v)) (+ 1.0 (* e (cos v)))))