
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
(FPCore (re im) :precision binary64 (log (hypot re im)))
double code(double re, double im) {
return log(hypot(re, im));
}
public static double code(double re, double im) {
return Math.log(Math.hypot(re, im));
}
def code(re, im): return math.log(math.hypot(re, im))
function code(re, im) return log(hypot(re, im)) end
function tmp = code(re, im) tmp = log(hypot(re, im)); end
code[re_, im_] := N[Log[N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\mathsf{hypot}\left(re, im\right)\right)
\end{array}
Initial program 53.0%
accelerator-lowering-hypot.f64100.0
Applied egg-rr100.0%
(FPCore (re im) :precision binary64 (log (fma re (* 0.5 (/ re im)) im)))
double code(double re, double im) {
return log(fma(re, (0.5 * (re / im)), im));
}
function code(re, im) return log(fma(re, Float64(0.5 * Float64(re / im)), im)) end
code[re_, im_] := N[Log[N[(re * N[(0.5 * N[(re / im), $MachinePrecision]), $MachinePrecision] + im), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\mathsf{fma}\left(re, 0.5 \cdot \frac{re}{im}, im\right)\right)
\end{array}
Initial program 53.0%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
associate-*l*N/A
/-rgt-identityN/A
times-fracN/A
*-lft-identityN/A
*-rgt-identityN/A
*-lowering-*.f64N/A
/-lowering-/.f6425.2
Simplified25.2%
(FPCore (re im) :precision binary64 (log im))
double code(double re, double im) {
return log(im);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(im)
end function
public static double code(double re, double im) {
return Math.log(im);
}
def code(re, im): return math.log(im)
function code(re, im) return log(im) end
function tmp = code(re, im) tmp = log(im); end
code[re_, im_] := N[Log[im], $MachinePrecision]
\begin{array}{l}
\\
\log im
\end{array}
Initial program 53.0%
Taylor expanded in re around 0
log-lowering-log.f6425.6
Simplified25.6%
(FPCore (re im) :precision binary64 (/ 0.0 0.0))
double code(double re, double im) {
return 0.0 / 0.0;
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = 0.0d0 / 0.0d0
end function
public static double code(double re, double im) {
return 0.0 / 0.0;
}
def code(re, im): return 0.0 / 0.0
function code(re, im) return Float64(0.0 / 0.0) end
function tmp = code(re, im) tmp = 0.0 / 0.0; end
code[re_, im_] := N[(0.0 / 0.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{0}{0}
\end{array}
Initial program 53.0%
pow1/2N/A
pow-to-expN/A
rem-log-expN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6453.0
Applied egg-rr53.0%
Taylor expanded in re around 0
log-lowering-log.f64N/A
unpow2N/A
*-lowering-*.f6430.7
Simplified30.7%
log-prodN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
associate-*l/N/A
+-inversesN/A
metadata-evalN/A
+-inversesN/A
/-lowering-/.f64N/A
+-inversesN/A
+-inverses0.0
Applied egg-rr0.0%
herbie shell --seed 2024194
(FPCore (re im)
:name "math.log/1 on complex, real part"
:precision binary64
(log (sqrt (+ (* re re) (* im im)))))