
(FPCore (re im) :precision binary64 (/ (log (sqrt (+ (* re re) (* im im)))) (log 10.0)))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im)))) / log(10.0);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im)))) / log(10.0d0)
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im)))) / Math.log(10.0);
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im)))) / math.log(10.0)
function code(re, im) return Float64(log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) / log(10.0)) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))) / log(10.0); end
code[re_, im_] := N[(N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\log 10}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (re im) :precision binary64 (/ (log (sqrt (+ (* re re) (* im im)))) (log 10.0)))
double code(double re, double im) {
return log(sqrt(((re * re) + (im * im)))) / log(10.0);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(sqrt(((re * re) + (im * im)))) / log(10.0d0)
end function
public static double code(double re, double im) {
return Math.log(Math.sqrt(((re * re) + (im * im)))) / Math.log(10.0);
}
def code(re, im): return math.log(math.sqrt(((re * re) + (im * im)))) / math.log(10.0)
function code(re, im) return Float64(log(sqrt(Float64(Float64(re * re) + Float64(im * im)))) / log(10.0)) end
function tmp = code(re, im) tmp = log(sqrt(((re * re) + (im * im)))) / log(10.0); end
code[re_, im_] := N[(N[Log[N[Sqrt[N[(N[(re * re), $MachinePrecision] + N[(im * im), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\sqrt{re \cdot re + im \cdot im}\right)}{\log 10}
\end{array}
(FPCore (re im) :precision binary64 (/ -1.0 (/ (log 0.1) (log (hypot im re)))))
double code(double re, double im) {
return -1.0 / (log(0.1) / log(hypot(im, re)));
}
public static double code(double re, double im) {
return -1.0 / (Math.log(0.1) / Math.log(Math.hypot(im, re)));
}
def code(re, im): return -1.0 / (math.log(0.1) / math.log(math.hypot(im, re)))
function code(re, im) return Float64(-1.0 / Float64(log(0.1) / log(hypot(im, re)))) end
function tmp = code(re, im) tmp = -1.0 / (log(0.1) / log(hypot(im, re))); end
code[re_, im_] := N[(-1.0 / N[(N[Log[0.1], $MachinePrecision] / N[Log[N[Sqrt[im ^ 2 + re ^ 2], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\frac{\log 0.1}{\log \left(\mathsf{hypot}\left(im, re\right)\right)}}
\end{array}
Initial program 53.5%
lift-/.f64N/A
clear-numN/A
frac-2negN/A
metadata-evalN/A
lower-/.f64N/A
distribute-neg-fracN/A
lower-/.f64N/A
lift-log.f64N/A
neg-logN/A
lower-log.f64N/A
metadata-eval53.4
lift-sqrt.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
lower-hypot.f6499.1
Applied rewrites99.1%
(FPCore (re im) :precision binary64 (/ (log (hypot re im)) (log 10.0)))
double code(double re, double im) {
return log(hypot(re, im)) / log(10.0);
}
public static double code(double re, double im) {
return Math.log(Math.hypot(re, im)) / Math.log(10.0);
}
def code(re, im): return math.log(math.hypot(re, im)) / math.log(10.0)
function code(re, im) return Float64(log(hypot(re, im)) / log(10.0)) end
function tmp = code(re, im) tmp = log(hypot(re, im)) / log(10.0); end
code[re_, im_] := N[(N[Log[N[Sqrt[re ^ 2 + im ^ 2], $MachinePrecision]], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log \left(\mathsf{hypot}\left(re, im\right)\right)}{\log 10}
\end{array}
Initial program 53.5%
lift-sqrt.f64N/A
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lower-hypot.f6499.0
Applied rewrites99.0%
(FPCore (re im) :precision binary64 (/ (* -0.5 (fma (/ re im) (/ re im) (* (log im) 2.0))) (log 0.1)))
double code(double re, double im) {
return (-0.5 * fma((re / im), (re / im), (log(im) * 2.0))) / log(0.1);
}
function code(re, im) return Float64(Float64(-0.5 * fma(Float64(re / im), Float64(re / im), Float64(log(im) * 2.0))) / log(0.1)) end
code[re_, im_] := N[(N[(-0.5 * N[(N[(re / im), $MachinePrecision] * N[(re / im), $MachinePrecision] + N[(N[Log[im], $MachinePrecision] * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Log[0.1], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-0.5 \cdot \mathsf{fma}\left(\frac{re}{im}, \frac{re}{im}, \log im \cdot 2\right)}{\log 0.1}
\end{array}
Initial program 53.5%
lift-/.f64N/A
frac-2negN/A
lower-/.f64N/A
lower-neg.f64N/A
lift-sqrt.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
lower-hypot.f64N/A
lift-log.f64N/A
neg-logN/A
lower-log.f64N/A
metadata-eval99.0
Applied rewrites99.0%
lift-neg.f64N/A
lift-log.f64N/A
lift-hypot.f64N/A
pow1/2N/A
lift-*.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-+.f64N/A
log-powN/A
distribute-lft-neg-inN/A
metadata-evalN/A
lower-*.f64N/A
lower-log.f6453.4
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6453.4
Applied rewrites53.4%
Taylor expanded in im around inf
+-commutativeN/A
unpow2N/A
unpow2N/A
times-fracN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
log-recN/A
lower-neg.f64N/A
lower-log.f6426.3
Applied rewrites26.3%
Taylor expanded in im around inf
+-commutativeN/A
unpow2N/A
unpow2N/A
times-fracN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-/.f64N/A
log-recN/A
mul-1-negN/A
associate-*r*N/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
lower-log.f6426.3
Applied rewrites26.3%
(FPCore (re im) :precision binary64 (/ -1.0 (/ (log 0.1) (log im))))
double code(double re, double im) {
return -1.0 / (log(0.1) / log(im));
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = (-1.0d0) / (log(0.1d0) / log(im))
end function
public static double code(double re, double im) {
return -1.0 / (Math.log(0.1) / Math.log(im));
}
def code(re, im): return -1.0 / (math.log(0.1) / math.log(im))
function code(re, im) return Float64(-1.0 / Float64(log(0.1) / log(im))) end
function tmp = code(re, im) tmp = -1.0 / (log(0.1) / log(im)); end
code[re_, im_] := N[(-1.0 / N[(N[Log[0.1], $MachinePrecision] / N[Log[im], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\frac{\log 0.1}{\log im}}
\end{array}
Initial program 53.5%
Taylor expanded in re around 0
lower-log.f6428.3
Applied rewrites28.3%
lift-/.f64N/A
clear-numN/A
frac-2negN/A
metadata-evalN/A
distribute-frac-negN/A
lift-log.f64N/A
neg-logN/A
metadata-evalN/A
lift-log.f64N/A
lower-/.f64N/A
lower-/.f6428.3
Applied rewrites28.3%
(FPCore (re im) :precision binary64 (/ (log im) (log 10.0)))
double code(double re, double im) {
return log(im) / log(10.0);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = log(im) / log(10.0d0)
end function
public static double code(double re, double im) {
return Math.log(im) / Math.log(10.0);
}
def code(re, im): return math.log(im) / math.log(10.0)
function code(re, im) return Float64(log(im) / log(10.0)) end
function tmp = code(re, im) tmp = log(im) / log(10.0); end
code[re_, im_] := N[(N[Log[im], $MachinePrecision] / N[Log[10.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\log im}{\log 10}
\end{array}
Initial program 53.5%
Taylor expanded in re around 0
lower-log.f6428.3
Applied rewrites28.3%
(FPCore (re im) :precision binary64 (/ (* -0.5 (* (/ re im) (/ re im))) (log 0.1)))
double code(double re, double im) {
return (-0.5 * ((re / im) * (re / im))) / log(0.1);
}
real(8) function code(re, im)
real(8), intent (in) :: re
real(8), intent (in) :: im
code = ((-0.5d0) * ((re / im) * (re / im))) / log(0.1d0)
end function
public static double code(double re, double im) {
return (-0.5 * ((re / im) * (re / im))) / Math.log(0.1);
}
def code(re, im): return (-0.5 * ((re / im) * (re / im))) / math.log(0.1)
function code(re, im) return Float64(Float64(-0.5 * Float64(Float64(re / im) * Float64(re / im))) / log(0.1)) end
function tmp = code(re, im) tmp = (-0.5 * ((re / im) * (re / im))) / log(0.1); end
code[re_, im_] := N[(N[(-0.5 * N[(N[(re / im), $MachinePrecision] * N[(re / im), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Log[0.1], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-0.5 \cdot \left(\frac{re}{im} \cdot \frac{re}{im}\right)}{\log 0.1}
\end{array}
Initial program 53.5%
lift-/.f64N/A
frac-2negN/A
lower-/.f64N/A
lower-neg.f64N/A
lift-sqrt.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
lower-hypot.f64N/A
lift-log.f64N/A
neg-logN/A
lower-log.f64N/A
metadata-eval99.0
Applied rewrites99.0%
lift-neg.f64N/A
lift-log.f64N/A
lift-hypot.f64N/A
pow1/2N/A
lift-*.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-+.f64N/A
log-powN/A
distribute-lft-neg-inN/A
metadata-evalN/A
lower-*.f64N/A
lower-log.f6453.4
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6453.4
Applied rewrites53.4%
Taylor expanded in im around inf
+-commutativeN/A
unpow2N/A
unpow2N/A
times-fracN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
log-recN/A
lower-neg.f64N/A
lower-log.f6426.3
Applied rewrites26.3%
Taylor expanded in re around inf
Applied rewrites3.4%
herbie shell --seed 2024308
(FPCore (re im)
:name "math.log10 on complex, real part"
:precision binary64
(/ (log (sqrt (+ (* re re) (* im im)))) (log 10.0)))