
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (re im) :precision binary64 (log (sqrt (+ (* re re) (* im im)))))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im))))
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im))));
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im))))
function code(re, im) return log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))); end
code[re_, im_] := N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\sqrt{re \cdot re + im \cdot im}\right)
\end{array}
(FPCore (re im) :precision binary64 (log (hypot re im)))
double code(double re, double im) {
return log(hypot(re, im));
}
public static double code(double re, double im) {
return Math.log(Math.hypot(re, im));
}
def code(re, im): return math.log(math.hypot(re, im))
function code(re, im) return log(hypot(re, im)) end
function tmp = code(re, im) tmp = log(hypot(re, im)); end
code[re_, im_] := N[Log[N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\mathsf{hypot}\left(re, im\right)\right)
\end{array}
Initial program 51.1%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
(FPCore (re im) :precision binary64 (log (+ im (* (* re 0.5) (/ re im)))))
double code(double re, double im) {
return log((im + ((re * 0.5) * (re / im))));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log((im + ((re * 0.5d0) * (re / im))))
end function
public static double code(double re, double im) {
return Math.log((im + ((re * 0.5) * (re / im))));
}
def code(re, im): return math.log((im + ((re * 0.5) * (re / im))))
function code(re, im) return log(Float64(im + Float64(Float64(re * 0.5) * Float64(re / im)))) end
function tmp = code(re, im) tmp = log((im + ((re * 0.5) * (re / im)))); end
code[re_, im_] := N[Log[N[(im + N[(N[(re * 0.5), $MachinePrecision] * N[(re / im), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(im + \left(re \cdot 0.5\right) \cdot \frac{re}{im}\right)
\end{array}
Initial program 51.1%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
*-commutativeN/A
associate-*l/N/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
+-lowering-+.f64N/A
*-commutativeN/A
associate-*r/N/A
metadata-evalN/A
associate-*l/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6423.3%
Simplified23.3%
associate-*r*N/A
associate-/l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f6425.1%
Applied egg-rr25.1%
Final simplification25.1%
(FPCore (re im) :precision binary64 (log im))
double code(double re, double im) {
return log(im);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(im)
end function
public static double code(double re, double im) {
return Math.log(im);
}
def code(re, im): return math.log(im)
function code(re, im) return log(im) end
function tmp = code(re, im) tmp = log(im); end
code[re_, im_] := N[Log[im], $MachinePrecision]
\begin{array}{l}
\\
\log im
\end{array}
Initial program 51.1%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
log-lowering-log.f6425.4%
Simplified25.4%
(FPCore (re im) :precision binary64 (* re (/ 0.5 (/ im re))))
double code(double re, double im) {
return re * (0.5 / (im / re));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = re * (0.5d0 / (im / re))
end function
public static double code(double re, double im) {
return re * (0.5 / (im / re));
}
def code(re, im): return re * (0.5 / (im / re))
function code(re, im) return Float64(re * Float64(0.5 / Float64(im / re))) end
function tmp = code(re, im) tmp = re * (0.5 / (im / re)); end
code[re_, im_] := N[(re * N[(0.5 / N[(im / re), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
re \cdot \frac{0.5}{\frac{im}{re}}
\end{array}
Initial program 51.1%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
+-lowering-+.f64N/A
log-lowering-log.f64N/A
unpow2N/A
associate-/r*N/A
associate-*r/N/A
*-commutativeN/A
associate-*l/N/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
/-lowering-/.f64N/A
*-commutativeN/A
associate-*r/N/A
metadata-evalN/A
associate-*l/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6422.4%
Simplified22.4%
Taylor expanded in im around 0
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f642.6%
Simplified2.6%
pow2N/A
pow-to-expN/A
*-commutativeN/A
count-2N/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
+-inversesN/A
associate-*r/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
distribute-lft-inN/A
distribute-rgt-outN/A
metadata-evalN/A
pow-to-expN/A
unpow1N/A
associate-*r*N/A
associate-*r/N/A
*-commutativeN/A
div-invN/A
associate-*l*N/A
Applied egg-rr3.1%
Final simplification3.1%
(FPCore (re im) :precision binary64 (* (/ 0.5 im) (* re re)))
double code(double re, double im) {
return (0.5 / im) * (re * re);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = (0.5d0 / im) * (re * re)
end function
public static double code(double re, double im) {
return (0.5 / im) * (re * re);
}
def code(re, im): return (0.5 / im) * (re * re)
function code(re, im) return Float64(Float64(0.5 / im) * Float64(re * re)) end
function tmp = code(re, im) tmp = (0.5 / im) * (re * re); end
code[re_, im_] := N[(N[(0.5 / im), $MachinePrecision] * N[(re * re), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{im} \cdot \left(re \cdot re\right)
\end{array}
Initial program 51.1%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
Taylor expanded in re around 0
+-lowering-+.f64N/A
log-lowering-log.f64N/A
unpow2N/A
associate-/r*N/A
associate-*r/N/A
*-commutativeN/A
associate-*l/N/A
associate-*r/N/A
metadata-evalN/A
associate-*r/N/A
/-lowering-/.f64N/A
*-commutativeN/A
associate-*r/N/A
metadata-evalN/A
associate-*l/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6422.4%
Simplified22.4%
Taylor expanded in im around 0
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f642.6%
Simplified2.6%
pow2N/A
pow-to-expN/A
*-commutativeN/A
count-2N/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
+-inversesN/A
associate-*r/N/A
+-inversesN/A
+-inversesN/A
flip-+N/A
distribute-lft-inN/A
distribute-rgt-outN/A
metadata-evalN/A
pow-to-expN/A
unpow1N/A
associate-*l/N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f643.0%
Applied egg-rr3.0%
(FPCore (re im) :precision binary64 (/ 0.0 0.0))
double code(double re, double im) {
return 0.0 / 0.0;
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = 0.0d0 / 0.0d0
end function
public static double code(double re, double im) {
return 0.0 / 0.0;
}
def code(re, im): return 0.0 / 0.0
function code(re, im) return Float64(0.0 / 0.0) end
function tmp = code(re, im) tmp = 0.0 / 0.0; end
code[re_, im_] := N[(0.0 / 0.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{0}{0}
\end{array}
Initial program 51.1%
log-lowering-log.f64N/A
hypot-defineN/A
hypot-lowering-hypot.f64100.0%
Simplified100.0%
pow1/2N/A
pow-to-expN/A
rem-log-expN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6451.1%
Applied egg-rr51.1%
Taylor expanded in re around 0
log-lowering-log.f64N/A
unpow2N/A
*-lowering-*.f6425.1%
Simplified25.1%
log-prodN/A
flip-+N/A
+-inversesN/A
+-inversesN/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f640.0%
Applied egg-rr0.0%
herbie shell --seed 2024139
(FPCore (re im)
:name "math.log/1 on complex, real part"
:precision binary64
(log (sqrt (+ (* re re) (* im im)))))