
(FPCore (re im) :precision binary64 (/ (log (sqrt (+ (* re re) (* im im)))) (log 10.0)))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im)))) / log(10.0);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im)))) / log(10.0d0)
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im)))) / Math.log(10.0);
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im)))) / math.log(10.0)
function code(re, im) return Float64(log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) / log(10.0)) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))) / log(10.0); end
code[re_, im_] := N[(N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\log 10}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (re im) :precision binary64 (/ (log (sqrt (+ (* re re) (* im im)))) (log 10.0)))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im)))) / log(10.0);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im)))) / log(10.0d0)
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im)))) / Math.log(10.0);
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im)))) / math.log(10.0)
function code(re, im) return Float64(log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) / log(10.0)) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))) / log(10.0); end
code[re_, im_] := N[(N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\log 10}
\end{array}
(FPCore (re im) :precision binary64 (/ (log (hypot re im)) (- 0.0 (log 0.1))))
double code(double re, double im) {
return log(hypot(re, im)) / (0.0 - log(0.1));
}
public static double code(double re, double im) {
return Math.log(Math.hypot(re, im)) / (0.0 - Math.log(0.1));
}
def code(re, im): return math.log(math.hypot(re, im)) / (0.0 - math.log(0.1))
function code(re, im) return Float64(log(hypot(re, im)) / Float64(0.0 - log(0.1))) end
function tmp = code(re, im) tmp = log(hypot(re, im)) / (0.0 - log(0.1)); end
code[re_, im_] := N[(N[Log[N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]], $MachinePrecision] / N[(0.0 - N[Log[0.1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\mathsf{hypot}\left(re, im\right)\right)}{0 - \log 0.1}
\end{array}
Initial program 58.3%
accelerator-lowering-hypot.f6499.0
Applied egg-rr99.0%
remove-double-negN/A
neg-lowering-neg.f64N/A
neg-logN/A
log-lowering-log.f64N/A
metadata-eval99.1
Applied egg-rr99.1%
Final simplification99.1%
(FPCore (re im) :precision binary64 (/ (log (hypot re im)) (log 10.0)))
double code(double re, double im) {
return log(hypot(re, im)) / log(10.0);
}
public static double code(double re, double im) {
return Math.log(Math.hypot(re, im)) / Math.log(10.0);
}
def code(re, im): return math.log(math.hypot(re, im)) / math.log(10.0)
function code(re, im) return Float64(log(hypot(re, im)) / log(10.0)) end
function tmp = code(re, im) tmp = log(hypot(re, im)) / log(10.0); end
code[re_, im_] := N[(N[Log[N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\mathsf{hypot}\left(re, im\right)\right)}{\log 10}
\end{array}
Initial program 58.3%
accelerator-lowering-hypot.f6499.0
Applied egg-rr99.0%
(FPCore (re im) :precision binary64 (/ (log (fma re (* re (/ 0.5 im)) im)) (- 0.0 (log 0.1))))
double code(double re, double im) {
return log(fma(re, (re * (0.5 / im)), im)) / (0.0 - log(0.1));
}
function code(re, im) return Float64(log(fma(re, Float64(re * Float64(0.5 / im)), im)) / Float64(0.0 - log(0.1))) end
code[re_, im_] := N[(N[Log[N[(re * N[(re * N[(0.5 / im), $MachinePrecision]), $MachinePrecision] + im), $MachinePrecision]], $MachinePrecision] / N[(0.0 - N[Log[0.1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\mathsf{fma}\left(re, re \cdot \frac{0.5}{im}, im\right)\right)}{0 - \log 0.1}
\end{array}
Initial program 58.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f6427.0
Simplified27.0%
remove-double-negN/A
neg-lowering-neg.f64N/A
neg-logN/A
log-lowering-log.f64N/A
metadata-eval27.0
Applied egg-rr27.0%
Final simplification27.0%
(FPCore (re im) :precision binary64 (/ (log (fma re (* re (/ 0.5 im)) im)) (log 10.0)))
double code(double re, double im) {
return log(fma(re, (re * (0.5 / im)), im)) / log(10.0);
}
function code(re, im) return Float64(log(fma(re, Float64(re * Float64(0.5 / im)), im)) / log(10.0)) end
code[re_, im_] := N[(N[Log[N[(re * N[(re * N[(0.5 / im), $MachinePrecision]), $MachinePrecision] + im), $MachinePrecision]], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\mathsf{fma}\left(re, re \cdot \frac{0.5}{im}, im\right)\right)}{\log 10}
\end{array}
Initial program 58.3%
Taylor expanded in re around 0
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
associate-*l*N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f6427.0
Simplified27.0%
(FPCore (re im) :precision binary64 (/ (log im) (- 0.0 (log 0.1))))
double code(double re, double im) {
return log(im) / (0.0 - log(0.1));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(im) / (0.0d0 - log(0.1d0))
end function
public static double code(double re, double im) {
return Math.log(im) / (0.0 - Math.log(0.1));
}
def code(re, im): return math.log(im) / (0.0 - math.log(0.1))
function code(re, im) return Float64(log(im) / Float64(0.0 - log(0.1))) end
function tmp = code(re, im) tmp = log(im) / (0.0 - log(0.1)); end
code[re_, im_] := N[(N[Log[im], $MachinePrecision] / N[(0.0 - N[Log[0.1], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log im}{0 - \log 0.1}
\end{array}
Initial program 58.3%
Taylor expanded in re around 0
/-lowering-/.f64N/A
log-lowering-log.f64N/A
log-lowering-log.f6427.4
Simplified27.4%
remove-double-negN/A
neg-lowering-neg.f64N/A
neg-logN/A
log-lowering-log.f64N/A
metadata-eval27.4
Applied egg-rr27.4%
Final simplification27.4%
(FPCore (re im) :precision binary64 (/ (log im) (log 10.0)))
double code(double re, double im) {
return log(im) / log(10.0);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(im) / log(10.0d0)
end function
public static double code(double re, double im) {
return Math.log(im) / Math.log(10.0);
}
def code(re, im): return math.log(im) / math.log(10.0)
function code(re, im) return Float64(log(im) / log(10.0)) end
function tmp = code(re, im) tmp = log(im) / log(10.0); end
code[re_, im_] := N[(N[Log[im], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log im}{\log 10}
\end{array}
Initial program 58.3%
Taylor expanded in re around 0
/-lowering-/.f64N/A
log-lowering-log.f64N/A
log-lowering-log.f6427.4
Simplified27.4%
herbie shell --seed 2024196
(FPCore (re im)
:name "math.log10 on complex, real part"
:precision binary64
(/ (log (sqrt (+ (* re re) (* im im)))) (log 10.0)))