
(FPCore (x) :precision binary64 (- (/ PI 2.0) (* 2.0 (asin (sqrt (/ (- 1.0 x) 2.0))))))
double code(double x) {
return (((double) M_PI) / 2.0) - (2.0 * asin(sqrt(((1.0 - x) / 2.0))));
}
public static double code(double x) {
return (Math.PI / 2.0) - (2.0 * Math.asin(Math.sqrt(((1.0 - x) / 2.0))));
}
def code(x): return (math.pi / 2.0) - (2.0 * math.asin(math.sqrt(((1.0 - x) / 2.0))))
function code(x) return Float64(Float64(pi / 2.0) - Float64(2.0 * asin(sqrt(Float64(Float64(1.0 - x) / 2.0))))) end
function tmp = code(x) tmp = (pi / 2.0) - (2.0 * asin(sqrt(((1.0 - x) / 2.0)))); end
code[x_] := N[(N[(Pi / 2.0), $MachinePrecision] - N[(2.0 * N[ArcSin[N[Sqrt[N[(N[(1.0 - x), $MachinePrecision] / 2.0), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\pi}{2} - 2 \cdot \sin^{-1} \left(\sqrt{\frac{1 - x}{2}}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ PI 2.0) (* 2.0 (asin (sqrt (/ (- 1.0 x) 2.0))))))
double code(double x) {
return (((double) M_PI) / 2.0) - (2.0 * asin(sqrt(((1.0 - x) / 2.0))));
}
public static double code(double x) {
return (Math.PI / 2.0) - (2.0 * Math.asin(Math.sqrt(((1.0 - x) / 2.0))));
}
def code(x): return (math.pi / 2.0) - (2.0 * math.asin(math.sqrt(((1.0 - x) / 2.0))))
function code(x) return Float64(Float64(pi / 2.0) - Float64(2.0 * asin(sqrt(Float64(Float64(1.0 - x) / 2.0))))) end
function tmp = code(x) tmp = (pi / 2.0) - (2.0 * asin(sqrt(((1.0 - x) / 2.0)))); end
code[x_] := N[(N[(Pi / 2.0), $MachinePrecision] - N[(2.0 * N[ArcSin[N[Sqrt[N[(N[(1.0 - x), $MachinePrecision] / 2.0), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\pi}{2} - 2 \cdot \sin^{-1} \left(\sqrt{\frac{1 - x}{2}}\right)
\end{array}
(FPCore (x) :precision binary64 (fma 2.0 (acos (sqrt (fma -0.5 x 0.5))) (* -0.5 PI)))
double code(double x) {
return fma(2.0, acos(sqrt(fma(-0.5, x, 0.5))), (-0.5 * ((double) M_PI)));
}
function code(x) return fma(2.0, acos(sqrt(fma(-0.5, x, 0.5))), Float64(-0.5 * pi)) end
code[x_] := N[(2.0 * N[ArcCos[N[Sqrt[N[(-0.5 * x + 0.5), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] + N[(-0.5 * Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2, \cos^{-1} \left(\sqrt{\mathsf{fma}\left(-0.5, x, 0.5\right)}\right), -0.5 \cdot \pi\right)
\end{array}
Initial program 6.4%
lift-asin.f64N/A
asin-acosN/A
lift-PI.f64N/A
lift-/.f64N/A
sub-negN/A
lift-/.f64N/A
div-invN/A
metadata-evalN/A
lower-fma.f64N/A
lower-neg.f64N/A
lower-acos.f647.8
lift-/.f64N/A
lift--.f64N/A
div-subN/A
metadata-evalN/A
sub-negN/A
+-commutativeN/A
div-invN/A
metadata-evalN/A
distribute-rgt-neg-inN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites7.8%
Taylor expanded in x around 0
cancel-sign-sub-invN/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-inN/A
associate-+r+N/A
distribute-rgt1-inN/A
metadata-evalN/A
neg-mul-1N/A
+-commutativeN/A
Applied rewrites7.8%
Final simplification7.8%
(FPCore (x) :precision binary64 (fma 2.0 (acos (sqrt 0.5)) (* -0.5 PI)))
double code(double x) {
return fma(2.0, acos(sqrt(0.5)), (-0.5 * ((double) M_PI)));
}
function code(x) return fma(2.0, acos(sqrt(0.5)), Float64(-0.5 * pi)) end
code[x_] := N[(2.0 * N[ArcCos[N[Sqrt[0.5], $MachinePrecision]], $MachinePrecision] + N[(-0.5 * Pi), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2, \cos^{-1} \left(\sqrt{0.5}\right), -0.5 \cdot \pi\right)
\end{array}
Initial program 6.7%
lift-asin.f64N/A
asin-acosN/A
lift-PI.f64N/A
lift-/.f64N/A
sub-negN/A
lift-/.f64N/A
div-invN/A
metadata-evalN/A
lower-fma.f64N/A
lower-neg.f64N/A
lower-acos.f648.2
lift-/.f64N/A
lift--.f64N/A
div-subN/A
metadata-evalN/A
sub-negN/A
+-commutativeN/A
div-invN/A
metadata-evalN/A
distribute-rgt-neg-inN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
Applied rewrites8.2%
Taylor expanded in x around 0
cancel-sign-sub-invN/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
cancel-sign-sub-invN/A
distribute-lft-inN/A
associate-+r+N/A
distribute-rgt1-inN/A
metadata-evalN/A
neg-mul-1N/A
+-commutativeN/A
Applied rewrites8.2%
Taylor expanded in x around 0
Applied rewrites5.4%
Final simplification5.4%
(FPCore (x) :precision binary64 (asin x))
double code(double x) {
return asin(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = asin(x)
end function
public static double code(double x) {
return Math.asin(x);
}
def code(x): return math.asin(x)
function code(x) return asin(x) end
function tmp = code(x) tmp = asin(x); end
code[x_] := N[ArcSin[x], $MachinePrecision]
\begin{array}{l}
\\
\sin^{-1} x
\end{array}
herbie shell --seed 2024226
(FPCore (x)
:name "Ian Simplification"
:precision binary64
:alt
(! :herbie-platform default (asin x))
(- (/ PI 2.0) (* 2.0 (asin (sqrt (/ (- 1.0 x) 2.0))))))