UniformSampleCone, z

Percentage Accurate: 99.9% → 99.9%
Time: 4.6s
Alternatives: 5
Speedup: 1.0×

Specification

?
\[\left(\left(2.328306437 \cdot 10^{-10} \leq ux \land ux \leq 1\right) \land \left(2.328306437 \cdot 10^{-10} \leq uy \land uy \leq 1\right)\right) \land \left(0 \leq maxCos \land maxCos \leq 1\right)\]
\[\begin{array}{l} \\ \left(1 - ux\right) + ux \cdot maxCos \end{array} \]
(FPCore (ux uy maxCos) :precision binary32 (+ (- 1.0 ux) (* ux maxCos)))
float code(float ux, float uy, float maxCos) {
	return (1.0f - ux) + (ux * maxCos);
}
real(4) function code(ux, uy, maxcos)
    real(4), intent (in) :: ux
    real(4), intent (in) :: uy
    real(4), intent (in) :: maxcos
    code = (1.0e0 - ux) + (ux * maxcos)
end function
function code(ux, uy, maxCos)
	return Float32(Float32(Float32(1.0) - ux) + Float32(ux * maxCos))
end
function tmp = code(ux, uy, maxCos)
	tmp = (single(1.0) - ux) + (ux * maxCos);
end
\begin{array}{l}

\\
\left(1 - ux\right) + ux \cdot maxCos
\end{array}

Sampling outcomes in binary32 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 5 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(1 - ux\right) + ux \cdot maxCos \end{array} \]
(FPCore (ux uy maxCos) :precision binary32 (+ (- 1.0 ux) (* ux maxCos)))
float code(float ux, float uy, float maxCos) {
	return (1.0f - ux) + (ux * maxCos);
}
real(4) function code(ux, uy, maxcos)
    real(4), intent (in) :: ux
    real(4), intent (in) :: uy
    real(4), intent (in) :: maxcos
    code = (1.0e0 - ux) + (ux * maxcos)
end function
function code(ux, uy, maxCos)
	return Float32(Float32(Float32(1.0) - ux) + Float32(ux * maxCos))
end
function tmp = code(ux, uy, maxCos)
	tmp = (single(1.0) - ux) + (ux * maxCos);
end
\begin{array}{l}

\\
\left(1 - ux\right) + ux \cdot maxCos
\end{array}

Alternative 1: 99.9% accurate, 0.0× speedup?

\[\begin{array}{l} \\ e^{\mathsf{log1p}\left(maxCos \cdot ux - ux\right)} \end{array} \]
(FPCore (ux uy maxCos) :precision binary32 (exp (log1p (- (* maxCos ux) ux))))
float code(float ux, float uy, float maxCos) {
	return expf(log1pf(((maxCos * ux) - ux)));
}
function code(ux, uy, maxCos)
	return exp(log1p(Float32(Float32(maxCos * ux) - ux)))
end
\begin{array}{l}

\\
e^{\mathsf{log1p}\left(maxCos \cdot ux - ux\right)}
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(1 - ux\right) + ux \cdot maxCos \]
  2. Step-by-step derivation
    1. add-exp-log99.8%

      \[\leadsto \color{blue}{e^{\log \left(\left(1 - ux\right) + ux \cdot maxCos\right)}} \]
    2. sub-neg99.8%

      \[\leadsto e^{\log \left(\color{blue}{\left(1 + \left(-ux\right)\right)} + ux \cdot maxCos\right)} \]
    3. associate-+l+99.9%

      \[\leadsto e^{\log \color{blue}{\left(1 + \left(\left(-ux\right) + ux \cdot maxCos\right)\right)}} \]
    4. neg-mul-199.9%

      \[\leadsto e^{\log \left(1 + \left(\color{blue}{-1 \cdot ux} + ux \cdot maxCos\right)\right)} \]
    5. *-commutative99.9%

      \[\leadsto e^{\log \left(1 + \left(-1 \cdot ux + \color{blue}{maxCos \cdot ux}\right)\right)} \]
    6. distribute-rgt-in99.9%

      \[\leadsto e^{\log \left(1 + \color{blue}{ux \cdot \left(-1 + maxCos\right)}\right)} \]
    7. +-commutative99.9%

      \[\leadsto e^{\log \left(1 + ux \cdot \color{blue}{\left(maxCos + -1\right)}\right)} \]
    8. log1p-udef99.9%

      \[\leadsto e^{\color{blue}{\mathsf{log1p}\left(ux \cdot \left(maxCos + -1\right)\right)}} \]
    9. distribute-rgt-in99.9%

      \[\leadsto e^{\mathsf{log1p}\left(\color{blue}{maxCos \cdot ux + -1 \cdot ux}\right)} \]
    10. *-commutative99.9%

      \[\leadsto e^{\mathsf{log1p}\left(\color{blue}{ux \cdot maxCos} + -1 \cdot ux\right)} \]
    11. neg-mul-199.9%

      \[\leadsto e^{\mathsf{log1p}\left(ux \cdot maxCos + \color{blue}{\left(-ux\right)}\right)} \]
    12. fma-def99.9%

      \[\leadsto e^{\mathsf{log1p}\left(\color{blue}{\mathsf{fma}\left(ux, maxCos, -ux\right)}\right)} \]
  3. Applied egg-rr99.9%

    \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(\mathsf{fma}\left(ux, maxCos, -ux\right)\right)}} \]
  4. Step-by-step derivation
    1. fma-neg99.9%

      \[\leadsto e^{\mathsf{log1p}\left(\color{blue}{ux \cdot maxCos - ux}\right)} \]
    2. *-commutative99.9%

      \[\leadsto e^{\mathsf{log1p}\left(\color{blue}{maxCos \cdot ux} - ux\right)} \]
  5. Simplified99.9%

    \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(maxCos \cdot ux - ux\right)}} \]
  6. Final simplification99.9%

    \[\leadsto e^{\mathsf{log1p}\left(maxCos \cdot ux - ux\right)} \]

Alternative 2: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ maxCos \cdot ux + \left(1 - ux\right) \end{array} \]
(FPCore (ux uy maxCos) :precision binary32 (+ (* maxCos ux) (- 1.0 ux)))
float code(float ux, float uy, float maxCos) {
	return (maxCos * ux) + (1.0f - ux);
}
real(4) function code(ux, uy, maxcos)
    real(4), intent (in) :: ux
    real(4), intent (in) :: uy
    real(4), intent (in) :: maxcos
    code = (maxcos * ux) + (1.0e0 - ux)
end function
function code(ux, uy, maxCos)
	return Float32(Float32(maxCos * ux) + Float32(Float32(1.0) - ux))
end
function tmp = code(ux, uy, maxCos)
	tmp = (maxCos * ux) + (single(1.0) - ux);
end
\begin{array}{l}

\\
maxCos \cdot ux + \left(1 - ux\right)
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(1 - ux\right) + ux \cdot maxCos \]
  2. Final simplification99.9%

    \[\leadsto maxCos \cdot ux + \left(1 - ux\right) \]

Alternative 3: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ 1 + ux \cdot \left(maxCos + -1\right) \end{array} \]
(FPCore (ux uy maxCos) :precision binary32 (+ 1.0 (* ux (+ maxCos -1.0))))
float code(float ux, float uy, float maxCos) {
	return 1.0f + (ux * (maxCos + -1.0f));
}
real(4) function code(ux, uy, maxcos)
    real(4), intent (in) :: ux
    real(4), intent (in) :: uy
    real(4), intent (in) :: maxcos
    code = 1.0e0 + (ux * (maxcos + (-1.0e0)))
end function
function code(ux, uy, maxCos)
	return Float32(Float32(1.0) + Float32(ux * Float32(maxCos + Float32(-1.0))))
end
function tmp = code(ux, uy, maxCos)
	tmp = single(1.0) + (ux * (maxCos + single(-1.0)));
end
\begin{array}{l}

\\
1 + ux \cdot \left(maxCos + -1\right)
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(1 - ux\right) + ux \cdot maxCos \]
  2. Step-by-step derivation
    1. associate-+l-99.9%

      \[\leadsto \color{blue}{1 - \left(ux - ux \cdot maxCos\right)} \]
    2. *-un-lft-identity99.9%

      \[\leadsto 1 - \left(\color{blue}{1 \cdot ux} - ux \cdot maxCos\right) \]
    3. *-commutative99.9%

      \[\leadsto 1 - \left(1 \cdot ux - \color{blue}{maxCos \cdot ux}\right) \]
    4. distribute-rgt-out--99.9%

      \[\leadsto 1 - \color{blue}{ux \cdot \left(1 - maxCos\right)} \]
  3. Applied egg-rr99.9%

    \[\leadsto \color{blue}{1 - ux \cdot \left(1 - maxCos\right)} \]
  4. Final simplification99.9%

    \[\leadsto 1 + ux \cdot \left(maxCos + -1\right) \]

Alternative 4: 98.1% accurate, 2.3× speedup?

\[\begin{array}{l} \\ 1 - ux \end{array} \]
(FPCore (ux uy maxCos) :precision binary32 (- 1.0 ux))
float code(float ux, float uy, float maxCos) {
	return 1.0f - ux;
}
real(4) function code(ux, uy, maxcos)
    real(4), intent (in) :: ux
    real(4), intent (in) :: uy
    real(4), intent (in) :: maxcos
    code = 1.0e0 - ux
end function
function code(ux, uy, maxCos)
	return Float32(Float32(1.0) - ux)
end
function tmp = code(ux, uy, maxCos)
	tmp = single(1.0) - ux;
end
\begin{array}{l}

\\
1 - ux
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(1 - ux\right) + ux \cdot maxCos \]
  2. Taylor expanded in maxCos around 0 98.0%

    \[\leadsto \color{blue}{1 - ux} \]
  3. Final simplification98.0%

    \[\leadsto 1 - ux \]

Alternative 5: 71.8% accurate, 7.0× speedup?

\[\begin{array}{l} \\ 1 \end{array} \]
(FPCore (ux uy maxCos) :precision binary32 1.0)
float code(float ux, float uy, float maxCos) {
	return 1.0f;
}
real(4) function code(ux, uy, maxcos)
    real(4), intent (in) :: ux
    real(4), intent (in) :: uy
    real(4), intent (in) :: maxcos
    code = 1.0e0
end function
function code(ux, uy, maxCos)
	return Float32(1.0)
end
function tmp = code(ux, uy, maxCos)
	tmp = single(1.0);
end
\begin{array}{l}

\\
1
\end{array}
Derivation
  1. Initial program 99.9%

    \[\left(1 - ux\right) + ux \cdot maxCos \]
  2. Taylor expanded in ux around 0 70.7%

    \[\leadsto \color{blue}{1} \]
  3. Final simplification70.7%

    \[\leadsto 1 \]

Reproduce

?
herbie shell --seed 2023188 
(FPCore (ux uy maxCos)
  :name "UniformSampleCone, z"
  :precision binary32
  :pre (and (and (and (<= 2.328306437e-10 ux) (<= ux 1.0)) (and (<= 2.328306437e-10 uy) (<= uy 1.0))) (and (<= 0.0 maxCos) (<= maxCos 1.0)))
  (+ (- 1.0 ux) (* ux maxCos)))