
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))
double code(double x) {
return 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x)))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 * (1.0 + (1.0 / Math.hypot(1.0, x)))));
}
def code(x): return 1.0 - math.sqrt((0.5 * (1.0 + (1.0 / math.hypot(1.0, x)))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 * Float64(1.0 + Float64(1.0 / hypot(1.0, x)))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 * (1.0 + (1.0 / hypot(1.0, x))))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 * N[(1.0 + N[(1.0 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 \cdot \left(1 + \frac{1}{\mathsf{hypot}\left(1, x\right)}\right)}
\end{array}
(FPCore (x) :precision binary64 (/ (- 0.5 (sqrt (/ 0.25 (fma x x 1.0)))) (+ 1.0 (sqrt (+ 0.5 (/ 0.5 (hypot 1.0 x)))))))
double code(double x) {
return (0.5 - sqrt((0.25 / fma(x, x, 1.0)))) / (1.0 + sqrt((0.5 + (0.5 / hypot(1.0, x)))));
}
function code(x) return Float64(Float64(0.5 - sqrt(Float64(0.25 / fma(x, x, 1.0)))) / Float64(1.0 + sqrt(Float64(0.5 + Float64(0.5 / hypot(1.0, x)))))) end
code[x_] := N[(N[(0.5 - N[Sqrt[N[(0.25 / N[(x * x + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5 - \sqrt{\frac{0.25}{\mathsf{fma}\left(x, x, 1\right)}}}{1 + \sqrt{0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}}}
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
flip--98.2%
metadata-eval98.2%
add-sqr-sqrt99.6%
associate--r+99.7%
metadata-eval99.7%
Applied egg-rr99.7%
add-sqr-sqrt99.6%
sqrt-unprod99.7%
frac-times99.7%
metadata-eval99.7%
hypot-undefine99.7%
hypot-undefine99.7%
rem-square-sqrt99.7%
metadata-eval99.7%
pow299.7%
Applied egg-rr99.7%
+-commutative99.7%
Simplified99.7%
unpow299.7%
fma-define99.7%
Applied egg-rr99.7%
(FPCore (x) :precision binary64 (let* ((t_0 (/ 0.5 (hypot 1.0 x)))) (/ (- 0.5 t_0) (+ 1.0 (sqrt (+ 0.5 t_0))))))
double code(double x) {
double t_0 = 0.5 / hypot(1.0, x);
return (0.5 - t_0) / (1.0 + sqrt((0.5 + t_0)));
}
public static double code(double x) {
double t_0 = 0.5 / Math.hypot(1.0, x);
return (0.5 - t_0) / (1.0 + Math.sqrt((0.5 + t_0)));
}
def code(x): t_0 = 0.5 / math.hypot(1.0, x) return (0.5 - t_0) / (1.0 + math.sqrt((0.5 + t_0)))
function code(x) t_0 = Float64(0.5 / hypot(1.0, x)) return Float64(Float64(0.5 - t_0) / Float64(1.0 + sqrt(Float64(0.5 + t_0)))) end
function tmp = code(x) t_0 = 0.5 / hypot(1.0, x); tmp = (0.5 - t_0) / (1.0 + sqrt((0.5 + t_0))); end
code[x_] := Block[{t$95$0 = N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]}, N[(N[(0.5 - t$95$0), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(0.5 + t$95$0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\\
\frac{0.5 - t\_0}{1 + \sqrt{0.5 + t\_0}}
\end{array}
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
flip--98.2%
metadata-eval98.2%
add-sqr-sqrt99.6%
associate--r+99.7%
metadata-eval99.7%
Applied egg-rr99.7%
(FPCore (x) :precision binary64 (- 1.0 (pow (pow (+ 0.5 (/ 0.5 (hypot 1.0 x))) 1.5) 0.3333333333333333)))
double code(double x) {
return 1.0 - pow(pow((0.5 + (0.5 / hypot(1.0, x))), 1.5), 0.3333333333333333);
}
public static double code(double x) {
return 1.0 - Math.pow(Math.pow((0.5 + (0.5 / Math.hypot(1.0, x))), 1.5), 0.3333333333333333);
}
def code(x): return 1.0 - math.pow(math.pow((0.5 + (0.5 / math.hypot(1.0, x))), 1.5), 0.3333333333333333)
function code(x) return Float64(1.0 - ((Float64(0.5 + Float64(0.5 / hypot(1.0, x))) ^ 1.5) ^ 0.3333333333333333)) end
function tmp = code(x) tmp = 1.0 - (((0.5 + (0.5 / hypot(1.0, x))) ^ 1.5) ^ 0.3333333333333333); end
code[x_] := N[(1.0 - N[Power[N[Power[N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1.5], $MachinePrecision], 0.3333333333333333], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - {\left({\left(0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}\right)}^{1.5}\right)}^{0.3333333333333333}
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
pow1/298.2%
metadata-eval98.2%
metadata-eval98.2%
pow-pow98.2%
metadata-eval98.2%
Applied egg-rr98.2%
(FPCore (x) :precision binary64 (- 1.0 (sqrt (+ 0.5 (/ 0.5 (hypot 1.0 x))))))
double code(double x) {
return 1.0 - sqrt((0.5 + (0.5 / hypot(1.0, x))));
}
public static double code(double x) {
return 1.0 - Math.sqrt((0.5 + (0.5 / Math.hypot(1.0, x))));
}
def code(x): return 1.0 - math.sqrt((0.5 + (0.5 / math.hypot(1.0, x))))
function code(x) return Float64(1.0 - sqrt(Float64(0.5 + Float64(0.5 / hypot(1.0, x))))) end
function tmp = code(x) tmp = 1.0 - sqrt((0.5 + (0.5 / hypot(1.0, x)))); end
code[x_] := N[(1.0 - N[Sqrt[N[(0.5 + N[(0.5 / N[Sqrt[1.0 ^ 2 + x ^ 2], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5 + \frac{0.5}{\mathsf{hypot}\left(1, x\right)}}
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
(FPCore (x) :precision binary64 (/ (- 0.5 (/ 0.5 x)) (+ 1.0 (sqrt (+ 0.5 (/ 0.5 x))))))
double code(double x) {
return (0.5 - (0.5 / x)) / (1.0 + sqrt((0.5 + (0.5 / x))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.5d0 - (0.5d0 / x)) / (1.0d0 + sqrt((0.5d0 + (0.5d0 / x))))
end function
public static double code(double x) {
return (0.5 - (0.5 / x)) / (1.0 + Math.sqrt((0.5 + (0.5 / x))));
}
def code(x): return (0.5 - (0.5 / x)) / (1.0 + math.sqrt((0.5 + (0.5 / x))))
function code(x) return Float64(Float64(0.5 - Float64(0.5 / x)) / Float64(1.0 + sqrt(Float64(0.5 + Float64(0.5 / x))))) end
function tmp = code(x) tmp = (0.5 - (0.5 / x)) / (1.0 + sqrt((0.5 + (0.5 / x)))); end
code[x_] := N[(N[(0.5 - N[(0.5 / x), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Sqrt[N[(0.5 + N[(0.5 / x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5 - \frac{0.5}{x}}{1 + \sqrt{0.5 + \frac{0.5}{x}}}
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
Taylor expanded in x around inf 95.1%
flip--95.1%
metadata-eval95.1%
add-sqr-sqrt96.6%
associate--r+96.6%
metadata-eval96.6%
Applied egg-rr96.6%
(FPCore (x) :precision binary64 (/ (- 0.5 (/ 0.5 x)) (+ 1.0 (sqrt 0.5))))
double code(double x) {
return (0.5 - (0.5 / x)) / (1.0 + sqrt(0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.5d0 - (0.5d0 / x)) / (1.0d0 + sqrt(0.5d0))
end function
public static double code(double x) {
return (0.5 - (0.5 / x)) / (1.0 + Math.sqrt(0.5));
}
def code(x): return (0.5 - (0.5 / x)) / (1.0 + math.sqrt(0.5))
function code(x) return Float64(Float64(0.5 - Float64(0.5 / x)) / Float64(1.0 + sqrt(0.5))) end
function tmp = code(x) tmp = (0.5 - (0.5 / x)) / (1.0 + sqrt(0.5)); end
code[x_] := N[(N[(0.5 - N[(0.5 / x), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5 - \frac{0.5}{x}}{1 + \sqrt{0.5}}
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
Taylor expanded in x around inf 95.1%
flip--95.1%
metadata-eval95.1%
add-sqr-sqrt96.6%
associate--r+96.6%
metadata-eval96.6%
Applied egg-rr96.6%
Taylor expanded in x around inf 96.1%
(FPCore (x) :precision binary64 (/ 0.5 (+ 1.0 (sqrt 0.5))))
double code(double x) {
return 0.5 / (1.0 + sqrt(0.5));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0 / (1.0d0 + sqrt(0.5d0))
end function
public static double code(double x) {
return 0.5 / (1.0 + Math.sqrt(0.5));
}
def code(x): return 0.5 / (1.0 + math.sqrt(0.5))
function code(x) return Float64(0.5 / Float64(1.0 + sqrt(0.5))) end
function tmp = code(x) tmp = 0.5 / (1.0 + sqrt(0.5)); end
code[x_] := N[(0.5 / N[(1.0 + N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{1 + \sqrt{0.5}}
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
flip--98.2%
metadata-eval98.2%
add-sqr-sqrt99.6%
associate--r+99.7%
metadata-eval99.7%
Applied egg-rr99.7%
Taylor expanded in x around inf 96.2%
(FPCore (x) :precision binary64 (- 1.0 (sqrt 0.5)))
double code(double x) {
return 1.0 - sqrt(0.5);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - sqrt(0.5d0)
end function
public static double code(double x) {
return 1.0 - Math.sqrt(0.5);
}
def code(x): return 1.0 - math.sqrt(0.5)
function code(x) return Float64(1.0 - sqrt(0.5)) end
function tmp = code(x) tmp = 1.0 - sqrt(0.5); end
code[x_] := N[(1.0 - N[Sqrt[0.5], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt{0.5}
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
Taylor expanded in x around inf 94.7%
(FPCore (x) :precision binary64 0.25)
double code(double x) {
return 0.25;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.25d0
end function
public static double code(double x) {
return 0.25;
}
def code(x): return 0.25
function code(x) return 0.25 end
function tmp = code(x) tmp = 0.25; end
code[x_] := 0.25
\begin{array}{l}
\\
0.25
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
flip--98.2%
metadata-eval98.2%
add-sqr-sqrt99.6%
associate--r+99.7%
metadata-eval99.7%
Applied egg-rr99.7%
Taylor expanded in x around 0 23.1%
Taylor expanded in x around inf 22.6%
Final simplification22.6%
(FPCore (x) :precision binary64 0.0)
double code(double x) {
return 0.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0
end function
public static double code(double x) {
return 0.0;
}
def code(x): return 0.0
function code(x) return 0.0 end
function tmp = code(x) tmp = 0.0; end
code[x_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 98.2%
distribute-lft-in98.2%
metadata-eval98.2%
associate-*r/98.2%
metadata-eval98.2%
Simplified98.2%
Taylor expanded in x around 0 3.1%
Final simplification3.1%
herbie shell --seed 2024096
(FPCore (x)
:name "Given's Rotation SVD example, simplified"
:precision binary64
(- 1.0 (sqrt (* 0.5 (+ 1.0 (/ 1.0 (hypot 1.0 x)))))))