
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (fma (* 0.5 (/ (/ re_m im_m) im_m)) re_m (log im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return fma((0.5 * ((re_m / im_m) / im_m)), re_m, log(im_m));
}
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return fma(Float64(0.5 * Float64(Float64(re_m / im_m) / im_m)), re_m, log(im_m)) end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(0.5 * N[(N[(re$95$m / im$95$m), $MachinePrecision] / im$95$m), $MachinePrecision]), $MachinePrecision] * re$95$m + N[Log[im$95$m], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\mathsf{fma}\left(0.5 \cdot \frac{\frac{re\_m}{im\_m}}{im\_m}, re\_m, \log im\_m\right)
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6424.6
Applied rewrites24.6%
Final simplification24.6%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (log im_m))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return log(im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = log(im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return Math.log(im_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return math.log(im_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return log(im_m) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = log(im_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[Log[im$95$m], $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\log im\_m
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
lower-log.f6426.8
Applied rewrites26.8%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (* (/ 1.0 (/ (/ im_m re_m) re_m)) (/ 0.5 im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return (1.0 / ((im_m / re_m) / re_m)) * (0.5 / im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = (1.0d0 / ((im_m / re_m) / re_m)) * (0.5d0 / im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return (1.0 / ((im_m / re_m) / re_m)) * (0.5 / im_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return (1.0 / ((im_m / re_m) / re_m)) * (0.5 / im_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(1.0 / Float64(Float64(im_m / re_m) / re_m)) * Float64(0.5 / im_m)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = (1.0 / ((im_m / re_m) / re_m)) * (0.5 / im_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(1.0 / N[(N[(im$95$m / re$95$m), $MachinePrecision] / re$95$m), $MachinePrecision]), $MachinePrecision] * N[(0.5 / im$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\frac{1}{\frac{\frac{im\_m}{re\_m}}{re\_m}} \cdot \frac{0.5}{im\_m}
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6424.6
Applied rewrites24.6%
Taylor expanded in re around inf
Applied rewrites3.3%
Applied rewrites3.3%
Applied rewrites3.3%
Final simplification3.3%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (/ 0.5 (* (/ (/ im_m re_m) re_m) im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return 0.5 / (((im_m / re_m) / re_m) * im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = 0.5d0 / (((im_m / re_m) / re_m) * im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return 0.5 / (((im_m / re_m) / re_m) * im_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return 0.5 / (((im_m / re_m) / re_m) * im_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(0.5 / Float64(Float64(Float64(im_m / re_m) / re_m) * im_m)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = 0.5 / (((im_m / re_m) / re_m) * im_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(0.5 / N[(N[(N[(im$95$m / re$95$m), $MachinePrecision] / re$95$m), $MachinePrecision] * im$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\frac{0.5}{\frac{\frac{im\_m}{re\_m}}{re\_m} \cdot im\_m}
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6424.6
Applied rewrites24.6%
Taylor expanded in re around inf
Applied rewrites3.3%
Applied rewrites3.3%
Applied rewrites3.3%
Final simplification3.3%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (* (* (/ re_m im_m) re_m) (/ 0.5 im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return ((re_m / im_m) * re_m) * (0.5 / im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = ((re_m / im_m) * re_m) * (0.5d0 / im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return ((re_m / im_m) * re_m) * (0.5 / im_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return ((re_m / im_m) * re_m) * (0.5 / im_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(Float64(re_m / im_m) * re_m) * Float64(0.5 / im_m)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = ((re_m / im_m) * re_m) * (0.5 / im_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(N[(re$95$m / im$95$m), $MachinePrecision] * re$95$m), $MachinePrecision] * N[(0.5 / im$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\left(\frac{re\_m}{im\_m} \cdot re\_m\right) \cdot \frac{0.5}{im\_m}
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6424.6
Applied rewrites24.6%
Taylor expanded in re around inf
Applied rewrites3.3%
Applied rewrites3.3%
Final simplification3.3%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (* (* (/ 0.5 im_m) re_m) (/ re_m im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return ((0.5 / im_m) * re_m) * (re_m / im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = ((0.5d0 / im_m) * re_m) * (re_m / im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return ((0.5 / im_m) * re_m) * (re_m / im_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return ((0.5 / im_m) * re_m) * (re_m / im_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(Float64(0.5 / im_m) * re_m) * Float64(re_m / im_m)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = ((0.5 / im_m) * re_m) * (re_m / im_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(N[(0.5 / im$95$m), $MachinePrecision] * re$95$m), $MachinePrecision] * N[(re$95$m / im$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\left(\frac{0.5}{im\_m} \cdot re\_m\right) \cdot \frac{re\_m}{im\_m}
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6424.6
Applied rewrites24.6%
Taylor expanded in re around inf
Applied rewrites3.3%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (* (- re_m) (/ (* -0.5 re_m) (* im_m im_m))))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return -re_m * ((-0.5 * re_m) / (im_m * im_m));
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = -re_m * (((-0.5d0) * re_m) / (im_m * im_m))
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return -re_m * ((-0.5 * re_m) / (im_m * im_m));
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return -re_m * ((-0.5 * re_m) / (im_m * im_m))
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(-re_m) * Float64(Float64(-0.5 * re_m) / Float64(im_m * im_m))) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = -re_m * ((-0.5 * re_m) / (im_m * im_m));
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[((-re$95$m) * N[(N[(-0.5 * re$95$m), $MachinePrecision] / N[(im$95$m * im$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\left(-re\_m\right) \cdot \frac{-0.5 \cdot re\_m}{im\_m \cdot im\_m}
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6424.6
Applied rewrites24.6%
Taylor expanded in re around inf
Applied rewrites3.3%
Applied rewrites3.3%
Applied rewrites3.0%
Final simplification3.0%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (* (* (/ -0.5 (* im_m im_m)) re_m) (- re_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return ((-0.5 / (im_m * im_m)) * re_m) * -re_m;
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = (((-0.5d0) / (im_m * im_m)) * re_m) * -re_m
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return ((-0.5 / (im_m * im_m)) * re_m) * -re_m;
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return ((-0.5 / (im_m * im_m)) * re_m) * -re_m
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(Float64(-0.5 / Float64(im_m * im_m)) * re_m) * Float64(-re_m)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = ((-0.5 / (im_m * im_m)) * re_m) * -re_m;
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(N[(-0.5 / N[(im$95$m * im$95$m), $MachinePrecision]), $MachinePrecision] * re$95$m), $MachinePrecision] * (-re$95$m)), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\left(\frac{-0.5}{im\_m \cdot im\_m} \cdot re\_m\right) \cdot \left(-re\_m\right)
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6424.6
Applied rewrites24.6%
Taylor expanded in re around inf
Applied rewrites3.3%
Applied rewrites3.3%
Taylor expanded in re around 0
Applied rewrites3.0%
im_m = (fabs.f64 im) re_m = (fabs.f64 re) NOTE: re_m and im_m should be sorted in increasing order before calling this function. (FPCore (re_m im_m) :precision binary64 (/ (* (* 0.5 re_m) re_m) (* im_m im_m)))
im_m = fabs(im);
re_m = fabs(re);
assert(re_m < im_m);
double code(double re_m, double im_m) {
return ((0.5 * re_m) * re_m) / (im_m * im_m);
}
im_m = abs(im)
re_m = abs(re)
NOTE: re_m and im_m should be sorted in increasing order before calling this function.
real(8) function code(re_m, im_m)
real(8), intent (in) :: re_m
real(8), intent (in) :: im_m
code = ((0.5d0 * re_m) * re_m) / (im_m * im_m)
end function
im_m = Math.abs(im);
re_m = Math.abs(re);
assert re_m < im_m;
public static double code(double re_m, double im_m) {
return ((0.5 * re_m) * re_m) / (im_m * im_m);
}
im_m = math.fabs(im) re_m = math.fabs(re) [re_m, im_m] = sort([re_m, im_m]) def code(re_m, im_m): return ((0.5 * re_m) * re_m) / (im_m * im_m)
im_m = abs(im) re_m = abs(re) re_m, im_m = sort([re_m, im_m]) function code(re_m, im_m) return Float64(Float64(Float64(0.5 * re_m) * re_m) / Float64(im_m * im_m)) end
im_m = abs(im);
re_m = abs(re);
re_m, im_m = num2cell(sort([re_m, im_m])){:}
function tmp = code(re_m, im_m)
tmp = ((0.5 * re_m) * re_m) / (im_m * im_m);
end
im_m = N[Abs[im], $MachinePrecision] re_m = N[Abs[re], $MachinePrecision] NOTE: re_m and im_m should be sorted in increasing order before calling this function. code[re$95$m_, im$95$m_] := N[(N[(N[(0.5 * re$95$m), $MachinePrecision] * re$95$m), $MachinePrecision] / N[(im$95$m * im$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
im_m = \left|im\right|
\\
re_m = \left|re\right|
\\
[re_m, im_m] = \mathsf{sort}([re_m, im_m])\\
\\
\frac{\left(0.5 \cdot re\_m\right) \cdot re\_m}{im\_m \cdot im\_m}
\end{array}
Initial program 51.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-/l*N/A
*-rgt-identityN/A
lower-*.f64N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-log.f6424.6
Applied rewrites24.6%
Taylor expanded in re around inf
Applied rewrites3.3%
Applied rewrites2.8%
Final simplification2.8%
herbie shell --seed 2024295
(FPCore (re im)
:name "math.log/1 on complex, real part"
:precision binary64
(log (sqrt (+ (* re re) (* im im)))))