
(FPCore (u1 u2) :precision binary64 (+ (* (* (/ 1.0 6.0) (pow (* -2.0 (log u1)) 0.5)) (cos (* (* 2.0 PI) u2))) 0.5))
double code(double u1, double u2) {
return (((1.0 / 6.0) * pow((-2.0 * log(u1)), 0.5)) * cos(((2.0 * ((double) M_PI)) * u2))) + 0.5;
}
public static double code(double u1, double u2) {
return (((1.0 / 6.0) * Math.pow((-2.0 * Math.log(u1)), 0.5)) * Math.cos(((2.0 * Math.PI) * u2))) + 0.5;
}
def code(u1, u2): return (((1.0 / 6.0) * math.pow((-2.0 * math.log(u1)), 0.5)) * math.cos(((2.0 * math.pi) * u2))) + 0.5
function code(u1, u2) return Float64(Float64(Float64(Float64(1.0 / 6.0) * (Float64(-2.0 * log(u1)) ^ 0.5)) * cos(Float64(Float64(2.0 * pi) * u2))) + 0.5) end
function tmp = code(u1, u2) tmp = (((1.0 / 6.0) * ((-2.0 * log(u1)) ^ 0.5)) * cos(((2.0 * pi) * u2))) + 0.5; end
code[u1_, u2_] := N[(N[(N[(N[(1.0 / 6.0), $MachinePrecision] * N[Power[N[(-2.0 * N[Log[u1], $MachinePrecision]), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(N[(2.0 * Pi), $MachinePrecision] * u2), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{6} \cdot {\left(-2 \cdot \log u1\right)}^{0.5}\right) \cdot \cos \left(\left(2 \cdot \pi\right) \cdot u2\right) + 0.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (u1 u2) :precision binary64 (+ (* (* (/ 1.0 6.0) (pow (* -2.0 (log u1)) 0.5)) (cos (* (* 2.0 PI) u2))) 0.5))
double code(double u1, double u2) {
return (((1.0 / 6.0) * pow((-2.0 * log(u1)), 0.5)) * cos(((2.0 * ((double) M_PI)) * u2))) + 0.5;
}
public static double code(double u1, double u2) {
return (((1.0 / 6.0) * Math.pow((-2.0 * Math.log(u1)), 0.5)) * Math.cos(((2.0 * Math.PI) * u2))) + 0.5;
}
def code(u1, u2): return (((1.0 / 6.0) * math.pow((-2.0 * math.log(u1)), 0.5)) * math.cos(((2.0 * math.pi) * u2))) + 0.5
function code(u1, u2) return Float64(Float64(Float64(Float64(1.0 / 6.0) * (Float64(-2.0 * log(u1)) ^ 0.5)) * cos(Float64(Float64(2.0 * pi) * u2))) + 0.5) end
function tmp = code(u1, u2) tmp = (((1.0 / 6.0) * ((-2.0 * log(u1)) ^ 0.5)) * cos(((2.0 * pi) * u2))) + 0.5; end
code[u1_, u2_] := N[(N[(N[(N[(1.0 / 6.0), $MachinePrecision] * N[Power[N[(-2.0 * N[Log[u1], $MachinePrecision]), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(N[(2.0 * Pi), $MachinePrecision] * u2), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{6} \cdot {\left(-2 \cdot \log u1\right)}^{0.5}\right) \cdot \cos \left(\left(2 \cdot \pi\right) \cdot u2\right) + 0.5
\end{array}
(FPCore (u1 u2) :precision binary64 (fma (* (* 0.16666666666666666 (sqrt 2.0)) (cos (* (* PI u2) -2.0))) (sqrt (- (log u1))) 0.5))
double code(double u1, double u2) {
return fma(((0.16666666666666666 * sqrt(2.0)) * cos(((((double) M_PI) * u2) * -2.0))), sqrt(-log(u1)), 0.5);
}
function code(u1, u2) return fma(Float64(Float64(0.16666666666666666 * sqrt(2.0)) * cos(Float64(Float64(pi * u2) * -2.0))), sqrt(Float64(-log(u1))), 0.5) end
code[u1_, u2_] := N[(N[(N[(0.16666666666666666 * N[Sqrt[2.0], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(N[(Pi * u2), $MachinePrecision] * -2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sqrt[(-N[Log[u1], $MachinePrecision])], $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(0.16666666666666666 \cdot \sqrt{2}\right) \cdot \cos \left(\left(\pi \cdot u2\right) \cdot -2\right), \sqrt{-\log u1}, 0.5\right)
\end{array}
Initial program 99.4%
Applied rewrites99.6%
(FPCore (u1 u2) :precision binary64 (fma (* (sqrt (- (log u1))) 0.16666666666666666) (* (sqrt 2.0) (cos (* (* PI u2) -2.0))) 0.5))
double code(double u1, double u2) {
return fma((sqrt(-log(u1)) * 0.16666666666666666), (sqrt(2.0) * cos(((((double) M_PI) * u2) * -2.0))), 0.5);
}
function code(u1, u2) return fma(Float64(sqrt(Float64(-log(u1))) * 0.16666666666666666), Float64(sqrt(2.0) * cos(Float64(Float64(pi * u2) * -2.0))), 0.5) end
code[u1_, u2_] := N[(N[(N[Sqrt[(-N[Log[u1], $MachinePrecision])], $MachinePrecision] * 0.16666666666666666), $MachinePrecision] * N[(N[Sqrt[2.0], $MachinePrecision] * N[Cos[N[(N[(Pi * u2), $MachinePrecision] * -2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\sqrt{-\log u1} \cdot 0.16666666666666666, \sqrt{2} \cdot \cos \left(\left(\pi \cdot u2\right) \cdot -2\right), 0.5\right)
\end{array}
Initial program 99.4%
Applied rewrites99.5%
(FPCore (u1 u2) :precision binary64 (fma (* (cos (* (* PI u2) -2.0)) (sqrt (* (log u1) -2.0))) 0.16666666666666666 0.5))
double code(double u1, double u2) {
return fma((cos(((((double) M_PI) * u2) * -2.0)) * sqrt((log(u1) * -2.0))), 0.16666666666666666, 0.5);
}
function code(u1, u2) return fma(Float64(cos(Float64(Float64(pi * u2) * -2.0)) * sqrt(Float64(log(u1) * -2.0))), 0.16666666666666666, 0.5) end
code[u1_, u2_] := N[(N[(N[Cos[N[(N[(Pi * u2), $MachinePrecision] * -2.0), $MachinePrecision]], $MachinePrecision] * N[Sqrt[N[(N[Log[u1], $MachinePrecision] * -2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * 0.16666666666666666 + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\cos \left(\left(\pi \cdot u2\right) \cdot -2\right) \cdot \sqrt{\log u1 \cdot -2}, 0.16666666666666666, 0.5\right)
\end{array}
Initial program 99.4%
Applied rewrites99.4%
(FPCore (u1 u2) :precision binary64 (fma (fma (* -0.3333333333333333 (* u2 u2)) (* (* PI PI) (sqrt 2.0)) (* (sqrt 2.0) 0.16666666666666666)) (sqrt (- (log u1))) 0.5))
double code(double u1, double u2) {
return fma(fma((-0.3333333333333333 * (u2 * u2)), ((((double) M_PI) * ((double) M_PI)) * sqrt(2.0)), (sqrt(2.0) * 0.16666666666666666)), sqrt(-log(u1)), 0.5);
}
function code(u1, u2) return fma(fma(Float64(-0.3333333333333333 * Float64(u2 * u2)), Float64(Float64(pi * pi) * sqrt(2.0)), Float64(sqrt(2.0) * 0.16666666666666666)), sqrt(Float64(-log(u1))), 0.5) end
code[u1_, u2_] := N[(N[(N[(-0.3333333333333333 * N[(u2 * u2), $MachinePrecision]), $MachinePrecision] * N[(N[(Pi * Pi), $MachinePrecision] * N[Sqrt[2.0], $MachinePrecision]), $MachinePrecision] + N[(N[Sqrt[2.0], $MachinePrecision] * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] * N[Sqrt[(-N[Log[u1], $MachinePrecision])], $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333 \cdot \left(u2 \cdot u2\right), \left(\pi \cdot \pi\right) \cdot \sqrt{2}, \sqrt{2} \cdot 0.16666666666666666\right), \sqrt{-\log u1}, 0.5\right)
\end{array}
Initial program 99.4%
Applied rewrites99.6%
Taylor expanded in u2 around 0
*-commutativeN/A
lower-*.f64N/A
lift-sqrt.f6498.4
Applied rewrites98.4%
Taylor expanded in u2 around 0
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-PI.f64N/A
lower-PI.f64N/A
lift-sqrt.f64N/A
*-commutativeN/A
lift-sqrt.f64N/A
lift-*.f6499.3
Applied rewrites99.3%
(FPCore (u1 u2) :precision binary64 (fma (* (sqrt 2.0) 0.16666666666666666) (sqrt (- (log u1))) 0.5))
double code(double u1, double u2) {
return fma((sqrt(2.0) * 0.16666666666666666), sqrt(-log(u1)), 0.5);
}
function code(u1, u2) return fma(Float64(sqrt(2.0) * 0.16666666666666666), sqrt(Float64(-log(u1))), 0.5) end
code[u1_, u2_] := N[(N[(N[Sqrt[2.0], $MachinePrecision] * 0.16666666666666666), $MachinePrecision] * N[Sqrt[(-N[Log[u1], $MachinePrecision])], $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\sqrt{2} \cdot 0.16666666666666666, \sqrt{-\log u1}, 0.5\right)
\end{array}
Initial program 99.4%
Applied rewrites99.6%
Taylor expanded in u2 around 0
*-commutativeN/A
lower-*.f64N/A
lift-sqrt.f6498.4
Applied rewrites98.4%
(FPCore (u1 u2) :precision binary64 (fma (* (sqrt (- (log u1))) (sqrt 2.0)) 0.16666666666666666 0.5))
double code(double u1, double u2) {
return fma((sqrt(-log(u1)) * sqrt(2.0)), 0.16666666666666666, 0.5);
}
function code(u1, u2) return fma(Float64(sqrt(Float64(-log(u1))) * sqrt(2.0)), 0.16666666666666666, 0.5) end
code[u1_, u2_] := N[(N[(N[Sqrt[(-N[Log[u1], $MachinePrecision])], $MachinePrecision] * N[Sqrt[2.0], $MachinePrecision]), $MachinePrecision] * 0.16666666666666666 + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\sqrt{-\log u1} \cdot \sqrt{2}, 0.16666666666666666, 0.5\right)
\end{array}
Initial program 99.4%
Taylor expanded in u2 around 0
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
sqrt-unprodN/A
*-commutativeN/A
unpow1/2N/A
lower-fma.f64N/A
unpow1/2N/A
lower-sqrt.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-log.f64N/A
metadata-eval98.3
Applied rewrites98.3%
lift-*.f64N/A
lift-log.f64N/A
lower-sqrt.f64N/A
sqrt-prodN/A
metadata-evalN/A
sqrt-unprodN/A
associate-*r*N/A
sqrt-unprodN/A
*-commutativeN/A
mul-1-negN/A
neg-logN/A
lower-*.f64N/A
neg-logN/A
lower-sqrt.f64N/A
lift-log.f64N/A
lift-neg.f64N/A
lift-sqrt.f6498.3
Applied rewrites98.3%
(FPCore (u1 u2) :precision binary64 (fma (* 0.16666666666666666 (sqrt (- (log u1)))) (sqrt 2.0) 0.5))
double code(double u1, double u2) {
return fma((0.16666666666666666 * sqrt(-log(u1))), sqrt(2.0), 0.5);
}
function code(u1, u2) return fma(Float64(0.16666666666666666 * sqrt(Float64(-log(u1)))), sqrt(2.0), 0.5) end
code[u1_, u2_] := N[(N[(0.16666666666666666 * N[Sqrt[(-N[Log[u1], $MachinePrecision])], $MachinePrecision]), $MachinePrecision] * N[Sqrt[2.0], $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.16666666666666666 \cdot \sqrt{-\log u1}, \sqrt{2}, 0.5\right)
\end{array}
Initial program 99.4%
Taylor expanded in u2 around 0
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
sqrt-unprodN/A
*-commutativeN/A
unpow1/2N/A
lower-fma.f64N/A
unpow1/2N/A
lower-sqrt.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-log.f64N/A
metadata-eval98.3
Applied rewrites98.3%
lift-fma.f64N/A
lift-sqrt.f64N/A
lift-*.f64N/A
lift-log.f64N/A
+-commutativeN/A
*-commutativeN/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
metadata-evalN/A
sqrt-prodN/A
metadata-evalN/A
sqrt-unprodN/A
associate-*r*N/A
sqrt-unprodN/A
*-commutativeN/A
mul-1-negN/A
neg-logN/A
metadata-evalN/A
sqrt-unprodN/A
Applied rewrites98.3%
(FPCore (u1 u2) :precision binary64 (fma (sqrt (* (log u1) -2.0)) 0.16666666666666666 0.5))
double code(double u1, double u2) {
return fma(sqrt((log(u1) * -2.0)), 0.16666666666666666, 0.5);
}
function code(u1, u2) return fma(sqrt(Float64(log(u1) * -2.0)), 0.16666666666666666, 0.5) end
code[u1_, u2_] := N[(N[Sqrt[N[(N[Log[u1], $MachinePrecision] * -2.0), $MachinePrecision]], $MachinePrecision] * 0.16666666666666666 + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\sqrt{\log u1 \cdot -2}, 0.16666666666666666, 0.5\right)
\end{array}
Initial program 99.4%
Taylor expanded in u2 around 0
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
sqrt-unprodN/A
*-commutativeN/A
unpow1/2N/A
lower-fma.f64N/A
unpow1/2N/A
lower-sqrt.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-log.f64N/A
metadata-eval98.3
Applied rewrites98.3%
herbie shell --seed 2025057
(FPCore (u1 u2)
:name "normal distribution"
:precision binary64
:pre (and (and (<= 0.0 u1) (<= u1 1.0)) (and (<= 0.0 u2) (<= u2 1.0)))
(+ (* (* (/ 1.0 6.0) (pow (* -2.0 (log u1)) 0.5)) (cos (* (* 2.0 PI) u2))) 0.5))