
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (fma (* (/ (/ re_m im_m) im_m) 0.5) re_m (log im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return fma((((re_m / im_m) / im_m) * 0.5), re_m, log(im_m));
}
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return fma(Float64(Float64(Float64(re_m / im_m) / im_m) * 0.5), re_m, log(im_m)) end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(N[(N[(re$95$m / im$95$m), $MachinePrecision] / im$95$m), $MachinePrecision] * 0.5), $MachinePrecision] * re$95$m + N[Log[im$95$m], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\mathsf{fma}\left(\frac{\frac{re\_m}{im\_m}}{im\_m} \cdot 0.5, re\_m, \log im\_m\right)
\end{array}
Initial program 57.0%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6422.8
Applied rewrites22.8%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (log im_m))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return log(im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = log(im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return Math.log(im_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return math.log(im_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return log(im_m) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = log(im_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[Log[im$95$m], $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\log im\_m
\end{array}
Initial program 57.0%
Taylor expanded in re around 0
lower-log.f6424.1
Applied rewrites24.1%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (* (/ 0.5 im_m) (* (/ re_m im_m) re_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return (0.5 / im_m) * ((re_m / im_m) * re_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = (0.5d0 / im_m) * ((re_m / im_m) * re_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return (0.5 / im_m) * ((re_m / im_m) * re_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return (0.5 / im_m) * ((re_m / im_m) * re_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(0.5 / im_m) * Float64(Float64(re_m / im_m) * re_m)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = (0.5 / im_m) * ((re_m / im_m) * re_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(0.5 / im$95$m), $MachinePrecision] * N[(N[(re$95$m / im$95$m), $MachinePrecision] * re$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\frac{0.5}{im\_m} \cdot \left(\frac{re\_m}{im\_m} \cdot re\_m\right)
\end{array}
Initial program 57.0%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6422.8
Applied rewrites22.8%
Taylor expanded in re around inf
Applied rewrites3.3%
Applied rewrites3.3%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (* (* (/ 0.5 im_m) re_m) (/ re_m im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return ((0.5 / im_m) * re_m) * (re_m / im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = ((0.5d0 / im_m) * re_m) * (re_m / im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return ((0.5 / im_m) * re_m) * (re_m / im_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return ((0.5 / im_m) * re_m) * (re_m / im_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(Float64(0.5 / im_m) * re_m) * Float64(re_m / im_m)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = ((0.5 / im_m) * re_m) * (re_m / im_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(N[(0.5 / im$95$m), $MachinePrecision] * re$95$m), $MachinePrecision] * N[(re$95$m / im$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\left(\frac{0.5}{im\_m} \cdot re\_m\right) \cdot \frac{re\_m}{im\_m}
\end{array}
Initial program 57.0%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6422.8
Applied rewrites22.8%
Taylor expanded in re around inf
Applied rewrites3.3%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (* (/ re_m (* im_m im_m)) (* re_m 0.5)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return (re_m / (im_m * im_m)) * (re_m * 0.5);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = (re_m / (im_m * im_m)) * (re_m * 0.5d0)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return (re_m / (im_m * im_m)) * (re_m * 0.5);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return (re_m / (im_m * im_m)) * (re_m * 0.5)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(re_m / Float64(im_m * im_m)) * Float64(re_m * 0.5)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = (re_m / (im_m * im_m)) * (re_m * 0.5);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(re$95$m / N[(im$95$m * im$95$m), $MachinePrecision]), $MachinePrecision] * N[(re$95$m * 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\frac{re\_m}{im\_m \cdot im\_m} \cdot \left(re\_m \cdot 0.5\right)
\end{array}
Initial program 57.0%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6422.8
Applied rewrites22.8%
Taylor expanded in re around inf
Applied rewrites3.3%
Applied rewrites3.3%
Applied rewrites3.1%
herbie shell --seed 2024313
(FPCore (re im)
:name "math.log/1 on complex, real part"
:precision binary64
(log (sqrt (+ (* re re) (* im im)))))