
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
(FPCore (re im) :precision binary64 (log (hypot re im)))
double code(double re, double im) {
return log(hypot(re, im));
}
public static double code(double re, double im) {
return Math.log(Math.hypot(re, im));
}
def code(re, im): return math.log(math.hypot(re, im))
function code(re, im) return log(hypot(re, im)) end
function tmp = code(re, im) tmp = log(hypot(re, im)); end
code[re_, im_] := N[Log[N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\mathsf{hypot}\left(re, im\right)\right)
\end{array}
Initial program 50.4%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
(FPCore (re im) :precision binary64 (log (* im (+ (* 0.5 (* re (/ (/ re im) im))) 1.0))))
double code(double re, double im) {
return log((im * ((0.5 * (re * ((re / im) / im))) + 1.0)));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log((im * ((0.5d0 * (re * ((re / im) / im))) + 1.0d0)))
end function
public static double code(double re, double im) {
return Math.log((im * ((0.5 * (re * ((re / im) / im))) + 1.0)));
}
def code(re, im): return math.log((im * ((0.5 * (re * ((re / im) / im))) + 1.0)))
function code(re, im) return log(Float64(im * Float64(Float64(0.5 * Float64(re * Float64(Float64(re / im) / im))) + 1.0))) end
function tmp = code(re, im) tmp = log((im * ((0.5 * (re * ((re / im) / im))) + 1.0))); end
code[re_, im_] := N[Log[N[(im * N[(N[(0.5 * N[(re * N[(N[(re / im), $MachinePrecision] / im), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(im \cdot \left(0.5 \cdot \left(re \cdot \frac{\frac{re}{im}}{im}\right) + 1\right)\right)
\end{array}
Initial program 50.4%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in im around inf
*-lowering-*.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6425.5%
Simplified25.5%
(FPCore (re im) :precision binary64 (+ (* 0.5 (* re (/ (/ re im) im))) (log im)))
double code(double re, double im) {
return (0.5 * (re * ((re / im) / im))) + log(im);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = (0.5d0 * (re * ((re / im) / im))) + log(im)
end function
public static double code(double re, double im) {
return (0.5 * (re * ((re / im) / im))) + Math.log(im);
}
def code(re, im): return (0.5 * (re * ((re / im) / im))) + math.log(im)
function code(re, im) return Float64(Float64(0.5 * Float64(re * Float64(Float64(re / im) / im))) + log(im)) end
function tmp = code(re, im) tmp = (0.5 * (re * ((re / im) / im))) + log(im); end
code[re_, im_] := N[(N[(0.5 * N[(re * N[(N[(re / im), $MachinePrecision] / im), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[Log[im], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(re \cdot \frac{\frac{re}{im}}{im}\right) + \log im
\end{array}
Initial program 50.4%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
+-lowering-+.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6424.2%
Simplified24.2%
Final simplification24.2%
(FPCore (re im) :precision binary64 (log im))
double code(double re, double im) {
return log(im);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(im)
end function
public static double code(double re, double im) {
return Math.log(im);
}
def code(re, im): return math.log(im)
function code(re, im) return log(im) end
function tmp = code(re, im) tmp = log(im); end
code[re_, im_] := N[Log[im], $MachinePrecision]
\begin{array}{l}
\\
\log im
\end{array}
Initial program 50.4%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
log-lowering-log.f6426.1%
Simplified26.1%
(FPCore (re im) :precision binary64 (/ (/ 0.5 im) (/ (/ im re) re)))
double code(double re, double im) {
return (0.5 / im) / ((im / re) / re);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = (0.5d0 / im) / ((im / re) / re)
end function
public static double code(double re, double im) {
return (0.5 / im) / ((im / re) / re);
}
def code(re, im): return (0.5 / im) / ((im / re) / re)
function code(re, im) return Float64(Float64(0.5 / im) / Float64(Float64(im / re) / re)) end
function tmp = code(re, im) tmp = (0.5 / im) / ((im / re) / re); end
code[re_, im_] := N[(N[(0.5 / im), $MachinePrecision] / N[(N[(im / re), $MachinePrecision] / re), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{0.5}{im}}{\frac{\frac{im}{re}}{re}}
\end{array}
Initial program 50.4%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
+-lowering-+.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6424.2%
Simplified24.2%
Taylor expanded in im around 0
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f642.8%
Simplified2.8%
frac-2negN/A
associate-*r*N/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
times-fracN/A
distribute-frac-neg2N/A
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
distribute-frac-negN/A
/-lowering-/.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
metadata-evalN/A
neg-sub0N/A
--lowering--.f643.5%
Applied egg-rr3.5%
frac-2negN/A
sub0-negN/A
distribute-rgt-neg-inN/A
remove-double-negN/A
associate-/r/N/A
associate-/r*N/A
frac-timesN/A
sub0-negN/A
*-commutativeN/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
metadata-evalN/A
sub0-negN/A
frac-2negN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
/-lowering-/.f643.5%
Applied egg-rr3.5%
(FPCore (re im) :precision binary64 (* (/ re im) (/ (* re 0.5) im)))
double code(double re, double im) {
return (re / im) * ((re * 0.5) / im);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = (re / im) * ((re * 0.5d0) / im)
end function
public static double code(double re, double im) {
return (re / im) * ((re * 0.5) / im);
}
def code(re, im): return (re / im) * ((re * 0.5) / im)
function code(re, im) return Float64(Float64(re / im) * Float64(Float64(re * 0.5) / im)) end
function tmp = code(re, im) tmp = (re / im) * ((re * 0.5) / im); end
code[re_, im_] := N[(N[(re / im), $MachinePrecision] * N[(N[(re * 0.5), $MachinePrecision] / im), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{re}{im} \cdot \frac{re \cdot 0.5}{im}
\end{array}
Initial program 50.4%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
+-lowering-+.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6424.2%
Simplified24.2%
Taylor expanded in im around 0
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f642.8%
Simplified2.8%
associate-*r*N/A
times-fracN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f643.5%
Applied egg-rr3.5%
Final simplification3.5%
(FPCore (re im) :precision binary64 (* re (/ 0.5 (/ im (/ re im)))))
double code(double re, double im) {
return re * (0.5 / (im / (re / im)));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = re * (0.5d0 / (im / (re / im)))
end function
public static double code(double re, double im) {
return re * (0.5 / (im / (re / im)));
}
def code(re, im): return re * (0.5 / (im / (re / im)))
function code(re, im) return Float64(re * Float64(0.5 / Float64(im / Float64(re / im)))) end
function tmp = code(re, im) tmp = re * (0.5 / (im / (re / im))); end
code[re_, im_] := N[(re * N[(0.5 / N[(im / N[(re / im), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
re \cdot \frac{0.5}{\frac{im}{\frac{re}{im}}}
\end{array}
Initial program 50.4%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
+-lowering-+.f64N/A
log-lowering-log.f64N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-/r*N/A
/-lowering-/.f64N/A
/-lowering-/.f6424.2%
Simplified24.2%
Taylor expanded in im around 0
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f642.8%
Simplified2.8%
frac-2negN/A
associate-*r*N/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
times-fracN/A
distribute-frac-neg2N/A
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
distribute-frac-negN/A
/-lowering-/.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
metadata-evalN/A
neg-sub0N/A
--lowering--.f643.5%
Applied egg-rr3.5%
sub0-negN/A
associate-/l*N/A
frac-timesN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
associate-*r/N/A
*-commutativeN/A
distribute-lft-neg-inN/A
metadata-evalN/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
clear-numN/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
clear-numN/A
associate-/r*N/A
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f643.5%
Applied egg-rr3.5%
Final simplification3.5%
herbie shell --seed 2024161
(FPCore (re im)
:name "math.log/1 on complex, real part"
:precision binary64
(log (sqrt (+ (* re re) (* im im)))))