
(FPCore (u1 u2) :precision binary64 (+ (* (* (/ 1.0 6.0) (pow (* -2.0 (log u1)) 0.5)) (cos (* (* 2.0 PI) u2))) 0.5))
double code(double u1, double u2) {
return (((1.0 / 6.0) * pow((-2.0 * log(u1)), 0.5)) * cos(((2.0 * ((double) M_PI)) * u2))) + 0.5;
}
public static double code(double u1, double u2) {
return (((1.0 / 6.0) * Math.pow((-2.0 * Math.log(u1)), 0.5)) * Math.cos(((2.0 * Math.PI) * u2))) + 0.5;
}
def code(u1, u2): return (((1.0 / 6.0) * math.pow((-2.0 * math.log(u1)), 0.5)) * math.cos(((2.0 * math.pi) * u2))) + 0.5
function code(u1, u2) return Float64(Float64(Float64(Float64(1.0 / 6.0) * (Float64(-2.0 * log(u1)) ^ 0.5)) * cos(Float64(Float64(2.0 * pi) * u2))) + 0.5) end
function tmp = code(u1, u2) tmp = (((1.0 / 6.0) * ((-2.0 * log(u1)) ^ 0.5)) * cos(((2.0 * pi) * u2))) + 0.5; end
code[u1_, u2_] := N[(N[(N[(N[(1.0 / 6.0), $MachinePrecision] * N[Power[N[(-2.0 * N[Log[u1], $MachinePrecision]), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(N[(2.0 * Pi), $MachinePrecision] * u2), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{6} \cdot {\left(-2 \cdot \log u1\right)}^{0.5}\right) \cdot \cos \left(\left(2 \cdot \pi\right) \cdot u2\right) + 0.5
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (u1 u2) :precision binary64 (+ (* (* (/ 1.0 6.0) (pow (* -2.0 (log u1)) 0.5)) (cos (* (* 2.0 PI) u2))) 0.5))
double code(double u1, double u2) {
return (((1.0 / 6.0) * pow((-2.0 * log(u1)), 0.5)) * cos(((2.0 * ((double) M_PI)) * u2))) + 0.5;
}
public static double code(double u1, double u2) {
return (((1.0 / 6.0) * Math.pow((-2.0 * Math.log(u1)), 0.5)) * Math.cos(((2.0 * Math.PI) * u2))) + 0.5;
}
def code(u1, u2): return (((1.0 / 6.0) * math.pow((-2.0 * math.log(u1)), 0.5)) * math.cos(((2.0 * math.pi) * u2))) + 0.5
function code(u1, u2) return Float64(Float64(Float64(Float64(1.0 / 6.0) * (Float64(-2.0 * log(u1)) ^ 0.5)) * cos(Float64(Float64(2.0 * pi) * u2))) + 0.5) end
function tmp = code(u1, u2) tmp = (((1.0 / 6.0) * ((-2.0 * log(u1)) ^ 0.5)) * cos(((2.0 * pi) * u2))) + 0.5; end
code[u1_, u2_] := N[(N[(N[(N[(1.0 / 6.0), $MachinePrecision] * N[Power[N[(-2.0 * N[Log[u1], $MachinePrecision]), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(N[(2.0 * Pi), $MachinePrecision] * u2), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{6} \cdot {\left(-2 \cdot \log u1\right)}^{0.5}\right) \cdot \cos \left(\left(2 \cdot \pi\right) \cdot u2\right) + 0.5
\end{array}
(FPCore (u1 u2)
:precision binary64
(let* ((t_0 (cos (* PI u2)))
(t_1 (sin (* PI u2)))
(t_2 (sqrt (* -2.0 (log u1)))))
(+
0.5
(* (* (- t_0 t_1) 0.16666666666666666) (+ (* t_1 t_2) (* t_0 t_2))))))
double code(double u1, double u2) {
double t_0 = cos((((double) M_PI) * u2));
double t_1 = sin((((double) M_PI) * u2));
double t_2 = sqrt((-2.0 * log(u1)));
return 0.5 + (((t_0 - t_1) * 0.16666666666666666) * ((t_1 * t_2) + (t_0 * t_2)));
}
public static double code(double u1, double u2) {
double t_0 = Math.cos((Math.PI * u2));
double t_1 = Math.sin((Math.PI * u2));
double t_2 = Math.sqrt((-2.0 * Math.log(u1)));
return 0.5 + (((t_0 - t_1) * 0.16666666666666666) * ((t_1 * t_2) + (t_0 * t_2)));
}
def code(u1, u2): t_0 = math.cos((math.pi * u2)) t_1 = math.sin((math.pi * u2)) t_2 = math.sqrt((-2.0 * math.log(u1))) return 0.5 + (((t_0 - t_1) * 0.16666666666666666) * ((t_1 * t_2) + (t_0 * t_2)))
function code(u1, u2) t_0 = cos(Float64(pi * u2)) t_1 = sin(Float64(pi * u2)) t_2 = sqrt(Float64(-2.0 * log(u1))) return Float64(0.5 + Float64(Float64(Float64(t_0 - t_1) * 0.16666666666666666) * Float64(Float64(t_1 * t_2) + Float64(t_0 * t_2)))) end
function tmp = code(u1, u2) t_0 = cos((pi * u2)); t_1 = sin((pi * u2)); t_2 = sqrt((-2.0 * log(u1))); tmp = 0.5 + (((t_0 - t_1) * 0.16666666666666666) * ((t_1 * t_2) + (t_0 * t_2))); end
code[u1_, u2_] := Block[{t$95$0 = N[Cos[N[(Pi * u2), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[Sin[N[(Pi * u2), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[Sqrt[N[(-2.0 * N[Log[u1], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, N[(0.5 + N[(N[(N[(t$95$0 - t$95$1), $MachinePrecision] * 0.16666666666666666), $MachinePrecision] * N[(N[(t$95$1 * t$95$2), $MachinePrecision] + N[(t$95$0 * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\pi \cdot u2\right)\\
t_1 := \sin \left(\pi \cdot u2\right)\\
t_2 := \sqrt{-2 \cdot \log u1}\\
0.5 + \left(\left(t\_0 - t\_1\right) \cdot 0.16666666666666666\right) \cdot \left(t\_1 \cdot t\_2 + t\_0 \cdot t\_2\right)
\end{array}
\end{array}
Initial program 99.4%
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow1/2N/A
sqrt-lowering-sqrt.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
cos-lowering-cos.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f6499.4%
Simplified99.4%
*-commutativeN/A
cos-2N/A
difference-of-squaresN/A
associate-*l*N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f64N/A
sin-lowering-sin.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f64N/A
Applied egg-rr99.4%
*-commutativeN/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
Applied egg-rr99.4%
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
+-lowering-+.f64N/A
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (u1 u2) :precision binary64 (+ 0.5 (* (* 0.16666666666666666 (pow (pow (* -2.0 (log u1)) 4.0) 0.125)) (cos (* (* PI u2) 2.0)))))
double code(double u1, double u2) {
return 0.5 + ((0.16666666666666666 * pow(pow((-2.0 * log(u1)), 4.0), 0.125)) * cos(((((double) M_PI) * u2) * 2.0)));
}
public static double code(double u1, double u2) {
return 0.5 + ((0.16666666666666666 * Math.pow(Math.pow((-2.0 * Math.log(u1)), 4.0), 0.125)) * Math.cos(((Math.PI * u2) * 2.0)));
}
def code(u1, u2): return 0.5 + ((0.16666666666666666 * math.pow(math.pow((-2.0 * math.log(u1)), 4.0), 0.125)) * math.cos(((math.pi * u2) * 2.0)))
function code(u1, u2) return Float64(0.5 + Float64(Float64(0.16666666666666666 * ((Float64(-2.0 * log(u1)) ^ 4.0) ^ 0.125)) * cos(Float64(Float64(pi * u2) * 2.0)))) end
function tmp = code(u1, u2) tmp = 0.5 + ((0.16666666666666666 * (((-2.0 * log(u1)) ^ 4.0) ^ 0.125)) * cos(((pi * u2) * 2.0))); end
code[u1_, u2_] := N[(0.5 + N[(N[(0.16666666666666666 * N[Power[N[Power[N[(-2.0 * N[Log[u1], $MachinePrecision]), $MachinePrecision], 4.0], $MachinePrecision], 0.125], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(N[(Pi * u2), $MachinePrecision] * 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 + \left(0.16666666666666666 \cdot {\left({\left(-2 \cdot \log u1\right)}^{4}\right)}^{0.125}\right) \cdot \cos \left(\left(\pi \cdot u2\right) \cdot 2\right)
\end{array}
Initial program 99.4%
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow1/2N/A
sqrt-lowering-sqrt.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
cos-lowering-cos.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f6499.4%
Simplified99.4%
pow1/2N/A
metadata-evalN/A
metadata-evalN/A
pow-sqrN/A
pow-prod-downN/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
metadata-eval99.4%
Applied egg-rr99.4%
metadata-evalN/A
metadata-evalN/A
pow-sqrN/A
pow-prod-downN/A
pow-lowering-pow.f64N/A
pow2N/A
pow2N/A
pow-prod-upN/A
metadata-evalN/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
metadata-eval99.4%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (u1 u2) :precision binary64 (+ 0.5 (* (cos (* (* PI u2) 2.0)) (* 0.16666666666666666 (sqrt (* -2.0 (log u1)))))))
double code(double u1, double u2) {
return 0.5 + (cos(((((double) M_PI) * u2) * 2.0)) * (0.16666666666666666 * sqrt((-2.0 * log(u1)))));
}
public static double code(double u1, double u2) {
return 0.5 + (Math.cos(((Math.PI * u2) * 2.0)) * (0.16666666666666666 * Math.sqrt((-2.0 * Math.log(u1)))));
}
def code(u1, u2): return 0.5 + (math.cos(((math.pi * u2) * 2.0)) * (0.16666666666666666 * math.sqrt((-2.0 * math.log(u1)))))
function code(u1, u2) return Float64(0.5 + Float64(cos(Float64(Float64(pi * u2) * 2.0)) * Float64(0.16666666666666666 * sqrt(Float64(-2.0 * log(u1)))))) end
function tmp = code(u1, u2) tmp = 0.5 + (cos(((pi * u2) * 2.0)) * (0.16666666666666666 * sqrt((-2.0 * log(u1))))); end
code[u1_, u2_] := N[(0.5 + N[(N[Cos[N[(N[(Pi * u2), $MachinePrecision] * 2.0), $MachinePrecision]], $MachinePrecision] * N[(0.16666666666666666 * N[Sqrt[N[(-2.0 * N[Log[u1], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 + \cos \left(\left(\pi \cdot u2\right) \cdot 2\right) \cdot \left(0.16666666666666666 \cdot \sqrt{-2 \cdot \log u1}\right)
\end{array}
Initial program 99.4%
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow1/2N/A
sqrt-lowering-sqrt.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
cos-lowering-cos.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f6499.4%
Simplified99.4%
Final simplification99.4%
(FPCore (u1 u2) :precision binary64 (+ 0.5 (* (sqrt (log (/ 1.0 u1))) (* 0.16666666666666666 (sqrt 2.0)))))
double code(double u1, double u2) {
return 0.5 + (sqrt(log((1.0 / u1))) * (0.16666666666666666 * sqrt(2.0)));
}
real(8) function code(u1, u2)
real(8), intent (in) :: u1
real(8), intent (in) :: u2
code = 0.5d0 + (sqrt(log((1.0d0 / u1))) * (0.16666666666666666d0 * sqrt(2.0d0)))
end function
public static double code(double u1, double u2) {
return 0.5 + (Math.sqrt(Math.log((1.0 / u1))) * (0.16666666666666666 * Math.sqrt(2.0)));
}
def code(u1, u2): return 0.5 + (math.sqrt(math.log((1.0 / u1))) * (0.16666666666666666 * math.sqrt(2.0)))
function code(u1, u2) return Float64(0.5 + Float64(sqrt(log(Float64(1.0 / u1))) * Float64(0.16666666666666666 * sqrt(2.0)))) end
function tmp = code(u1, u2) tmp = 0.5 + (sqrt(log((1.0 / u1))) * (0.16666666666666666 * sqrt(2.0))); end
code[u1_, u2_] := N[(0.5 + N[(N[Sqrt[N[Log[N[(1.0 / u1), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] * N[(0.16666666666666666 * N[Sqrt[2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 + \sqrt{\log \left(\frac{1}{u1}\right)} \cdot \left(0.16666666666666666 \cdot \sqrt{2}\right)
\end{array}
Initial program 99.4%
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow1/2N/A
sqrt-lowering-sqrt.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
cos-lowering-cos.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f6499.4%
Simplified99.4%
Taylor expanded in u2 around 0
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f640.0%
Simplified0.0%
+-commutativeN/A
*-commutativeN/A
associate-*r*N/A
sqrt-prodN/A
*-commutativeN/A
pow1/2N/A
metadata-evalN/A
pow-powN/A
pow2N/A
sqr-powN/A
associate-*l*N/A
fma-defineN/A
fma-lowering-fma.f64N/A
Applied egg-rr98.1%
Taylor expanded in u1 around inf
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f6498.6%
Simplified98.6%
Final simplification98.6%
(FPCore (u1 u2) :precision binary64 (+ 0.5 (* 0.16666666666666666 (sqrt (* -2.0 (log u1))))))
double code(double u1, double u2) {
return 0.5 + (0.16666666666666666 * sqrt((-2.0 * log(u1))));
}
real(8) function code(u1, u2)
real(8), intent (in) :: u1
real(8), intent (in) :: u2
code = 0.5d0 + (0.16666666666666666d0 * sqrt(((-2.0d0) * log(u1))))
end function
public static double code(double u1, double u2) {
return 0.5 + (0.16666666666666666 * Math.sqrt((-2.0 * Math.log(u1))));
}
def code(u1, u2): return 0.5 + (0.16666666666666666 * math.sqrt((-2.0 * math.log(u1))))
function code(u1, u2) return Float64(0.5 + Float64(0.16666666666666666 * sqrt(Float64(-2.0 * log(u1))))) end
function tmp = code(u1, u2) tmp = 0.5 + (0.16666666666666666 * sqrt((-2.0 * log(u1)))); end
code[u1_, u2_] := N[(0.5 + N[(0.16666666666666666 * N[Sqrt[N[(-2.0 * N[Log[u1], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 + 0.16666666666666666 \cdot \sqrt{-2 \cdot \log u1}
\end{array}
Initial program 99.4%
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
unpow1/2N/A
sqrt-lowering-sqrt.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
cos-lowering-cos.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
PI-lowering-PI.f6499.4%
Simplified99.4%
Taylor expanded in u2 around 0
+-lowering-+.f64N/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f640.0%
Simplified0.0%
+-commutativeN/A
*-commutativeN/A
associate-*r*N/A
sqrt-prodN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
pow1/2N/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
log-lowering-log.f6498.4%
Applied egg-rr98.4%
*-commutativeN/A
*-lowering-*.f64N/A
unpow1/2N/A
unpow1N/A
metadata-evalN/A
pow-prod-upN/A
sqrt-lowering-sqrt.f64N/A
pow-prod-upN/A
metadata-evalN/A
unpow1N/A
*-lowering-*.f64N/A
log-lowering-log.f6498.4%
Applied egg-rr98.4%
Final simplification98.4%
herbie shell --seed 2024139
(FPCore (u1 u2)
:name "normal distribution"
:precision binary64
:pre (and (and (<= 0.0 u1) (<= u1 1.0)) (and (<= 0.0 u2) (<= u2 1.0)))
(+ (* (* (/ 1.0 6.0) (pow (* -2.0 (log u1)) 0.5)) (cos (* (* 2.0 PI) u2))) 0.5))