
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (log (+ N 1.0)) (log N)))
double code(double N) {
return log((N + 1.0)) - log(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((n + 1.0d0)) - log(n)
end function
public static double code(double N) {
return Math.log((N + 1.0)) - Math.log(N);
}
def code(N): return math.log((N + 1.0)) - math.log(N)
function code(N) return Float64(log(Float64(N + 1.0)) - log(N)) end
function tmp = code(N) tmp = log((N + 1.0)) - log(N); end
code[N_] := N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\log \left(N + 1\right) - \log N
\end{array}
(FPCore (N)
:precision binary64
(if (<= (- (log (+ N 1.0)) (log N)) 0.0006)
(/
-1.0
(/
N
(- -1.0 (/ (fma N (fma N -0.5 0.3333333333333333) -0.25) (* N (* N N))))))
(- (log (/ N (+ N 1.0))))))
double code(double N) {
double tmp;
if ((log((N + 1.0)) - log(N)) <= 0.0006) {
tmp = -1.0 / (N / (-1.0 - (fma(N, fma(N, -0.5, 0.3333333333333333), -0.25) / (N * (N * N)))));
} else {
tmp = -log((N / (N + 1.0)));
}
return tmp;
}
function code(N) tmp = 0.0 if (Float64(log(Float64(N + 1.0)) - log(N)) <= 0.0006) tmp = Float64(-1.0 / Float64(N / Float64(-1.0 - Float64(fma(N, fma(N, -0.5, 0.3333333333333333), -0.25) / Float64(N * Float64(N * N)))))); else tmp = Float64(-log(Float64(N / Float64(N + 1.0)))); end return tmp end
code[N_] := If[LessEqual[N[(N[Log[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[Log[N], $MachinePrecision]), $MachinePrecision], 0.0006], N[(-1.0 / N[(N / N[(-1.0 - N[(N[(N * N[(N * -0.5 + 0.3333333333333333), $MachinePrecision] + -0.25), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-N[Log[N[(N / N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision])]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\log \left(N + 1\right) - \log N \leq 0.0006:\\
\;\;\;\;\frac{-1}{\frac{N}{-1 - \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.3333333333333333\right), -0.25\right)}{N \cdot \left(N \cdot N\right)}}}\\
\mathbf{else}:\\
\;\;\;\;-\log \left(\frac{N}{N + 1}\right)\\
\end{array}
\end{array}
if (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) < 5.99999999999999947e-4Initial program 16.5%
Taylor expanded in N around inf
Simplified99.8%
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6499.8
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.8
Applied egg-rr99.8%
Taylor expanded in N around 0
lower-/.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.8
Simplified99.8%
if 5.99999999999999947e-4 < (-.f64 (log.f64 (+.f64 N #s(literal 1 binary64))) (log.f64 N)) Initial program 91.1%
lift-+.f64N/A
diff-logN/A
clear-numN/A
log-recN/A
diff-logN/A
lift-log.f64N/A
lift-log.f64N/A
lower-neg.f64N/A
lift-log.f64N/A
lift-log.f64N/A
diff-logN/A
lower-log.f64N/A
lower-/.f6494.1
Applied egg-rr94.1%
Final simplification99.4%
(FPCore (N)
:precision binary64
(if (<= N 1350.0)
(log (/ (+ N 1.0) N))
(/
-1.0
(/
N
(-
-1.0
(/ (fma N (fma N -0.5 0.3333333333333333) -0.25) (* N (* N N))))))))
double code(double N) {
double tmp;
if (N <= 1350.0) {
tmp = log(((N + 1.0) / N));
} else {
tmp = -1.0 / (N / (-1.0 - (fma(N, fma(N, -0.5, 0.3333333333333333), -0.25) / (N * (N * N)))));
}
return tmp;
}
function code(N) tmp = 0.0 if (N <= 1350.0) tmp = log(Float64(Float64(N + 1.0) / N)); else tmp = Float64(-1.0 / Float64(N / Float64(-1.0 - Float64(fma(N, fma(N, -0.5, 0.3333333333333333), -0.25) / Float64(N * Float64(N * N)))))); end return tmp end
code[N_] := If[LessEqual[N, 1350.0], N[Log[N[(N[(N + 1.0), $MachinePrecision] / N), $MachinePrecision]], $MachinePrecision], N[(-1.0 / N[(N / N[(-1.0 - N[(N[(N * N[(N * -0.5 + 0.3333333333333333), $MachinePrecision] + -0.25), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq 1350:\\
\;\;\;\;\log \left(\frac{N + 1}{N}\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{-1}{\frac{N}{-1 - \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.3333333333333333\right), -0.25\right)}{N \cdot \left(N \cdot N\right)}}}\\
\end{array}
\end{array}
if N < 1350Initial program 91.5%
lift-+.f64N/A
diff-logN/A
lower-log.f64N/A
lower-/.f6493.8
Applied egg-rr93.8%
if 1350 < N Initial program 16.8%
Taylor expanded in N around inf
Simplified99.8%
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6499.8
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.8
Applied egg-rr99.8%
Taylor expanded in N around 0
lower-/.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.8
Simplified99.8%
Final simplification99.3%
(FPCore (N)
:precision binary64
(let* ((t_0 (+ -0.5 (/ 0.3333333333333333 N))))
(/
(- 1.0 (/ (* t_0 t_0) (* N N)))
(* N (- 1.0 (/ (+ -0.5 (/ (+ 0.3333333333333333 (/ -0.25 N)) N)) N))))))
double code(double N) {
double t_0 = -0.5 + (0.3333333333333333 / N);
return (1.0 - ((t_0 * t_0) / (N * N))) / (N * (1.0 - ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: t_0
t_0 = (-0.5d0) + (0.3333333333333333d0 / n)
code = (1.0d0 - ((t_0 * t_0) / (n * n))) / (n * (1.0d0 - (((-0.5d0) + ((0.3333333333333333d0 + ((-0.25d0) / n)) / n)) / n)))
end function
public static double code(double N) {
double t_0 = -0.5 + (0.3333333333333333 / N);
return (1.0 - ((t_0 * t_0) / (N * N))) / (N * (1.0 - ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)));
}
def code(N): t_0 = -0.5 + (0.3333333333333333 / N) return (1.0 - ((t_0 * t_0) / (N * N))) / (N * (1.0 - ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)))
function code(N) t_0 = Float64(-0.5 + Float64(0.3333333333333333 / N)) return Float64(Float64(1.0 - Float64(Float64(t_0 * t_0) / Float64(N * N))) / Float64(N * Float64(1.0 - Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 + Float64(-0.25 / N)) / N)) / N)))) end
function tmp = code(N) t_0 = -0.5 + (0.3333333333333333 / N); tmp = (1.0 - ((t_0 * t_0) / (N * N))) / (N * (1.0 - ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N))); end
code[N_] := Block[{t$95$0 = N[(-0.5 + N[(0.3333333333333333 / N), $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - N[(N[(t$95$0 * t$95$0), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N * N[(1.0 - N[(N[(-0.5 + N[(N[(0.3333333333333333 + N[(-0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := -0.5 + \frac{0.3333333333333333}{N}\\
\frac{1 - \frac{t\_0 \cdot t\_0}{N \cdot N}}{N \cdot \left(1 - \frac{-0.5 + \frac{0.3333333333333333 + \frac{-0.25}{N}}{N}}{N}\right)}
\end{array}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
Simplified96.2%
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied egg-rr96.2%
Taylor expanded in N around inf
lower-/.f6496.4
Simplified96.4%
Taylor expanded in N around inf
lower-/.f6496.5
Simplified96.5%
Final simplification96.5%
(FPCore (N) :precision binary64 (/ (+ (/ (- (/ (+ 0.3333333333333333 (/ -0.2361111111111111 N)) N) 0.25) (* N N)) 1.0) (* N (- 1.0 (/ (+ -0.5 (/ (+ 0.3333333333333333 (/ -0.25 N)) N)) N)))))
double code(double N) {
return (((((0.3333333333333333 + (-0.2361111111111111 / N)) / N) - 0.25) / (N * N)) + 1.0) / (N * (1.0 - ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((((0.3333333333333333d0 + ((-0.2361111111111111d0) / n)) / n) - 0.25d0) / (n * n)) + 1.0d0) / (n * (1.0d0 - (((-0.5d0) + ((0.3333333333333333d0 + ((-0.25d0) / n)) / n)) / n)))
end function
public static double code(double N) {
return (((((0.3333333333333333 + (-0.2361111111111111 / N)) / N) - 0.25) / (N * N)) + 1.0) / (N * (1.0 - ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)));
}
def code(N): return (((((0.3333333333333333 + (-0.2361111111111111 / N)) / N) - 0.25) / (N * N)) + 1.0) / (N * (1.0 - ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N)))
function code(N) return Float64(Float64(Float64(Float64(Float64(Float64(0.3333333333333333 + Float64(-0.2361111111111111 / N)) / N) - 0.25) / Float64(N * N)) + 1.0) / Float64(N * Float64(1.0 - Float64(Float64(-0.5 + Float64(Float64(0.3333333333333333 + Float64(-0.25 / N)) / N)) / N)))) end
function tmp = code(N) tmp = (((((0.3333333333333333 + (-0.2361111111111111 / N)) / N) - 0.25) / (N * N)) + 1.0) / (N * (1.0 - ((-0.5 + ((0.3333333333333333 + (-0.25 / N)) / N)) / N))); end
code[N_] := N[(N[(N[(N[(N[(N[(0.3333333333333333 + N[(-0.2361111111111111 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] - 0.25), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N[(N * N[(1.0 - N[(N[(-0.5 + N[(N[(0.3333333333333333 + N[(-0.25 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\frac{0.3333333333333333 + \frac{-0.2361111111111111}{N}}{N} - 0.25}{N \cdot N} + 1}{N \cdot \left(1 - \frac{-0.5 + \frac{0.3333333333333333 + \frac{-0.25}{N}}{N}}{N}\right)}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
Simplified96.2%
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied egg-rr96.2%
Taylor expanded in N around inf
lower-/.f6496.4
Simplified96.4%
Taylor expanded in N around -inf
lower-/.f64N/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-/.f64N/A
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
lower-/.f64N/A
metadata-evalN/A
unpow2N/A
lower-*.f6496.4
Simplified96.4%
Final simplification96.4%
(FPCore (N) :precision binary64 (/ -1.0 (/ N (- -1.0 (/ (fma N (fma N -0.5 0.3333333333333333) -0.25) (* N (* N N)))))))
double code(double N) {
return -1.0 / (N / (-1.0 - (fma(N, fma(N, -0.5, 0.3333333333333333), -0.25) / (N * (N * N)))));
}
function code(N) return Float64(-1.0 / Float64(N / Float64(-1.0 - Float64(fma(N, fma(N, -0.5, 0.3333333333333333), -0.25) / Float64(N * Float64(N * N)))))) end
code[N_] := N[(-1.0 / N[(N / N[(-1.0 - N[(N[(N * N[(N * -0.5 + 0.3333333333333333), $MachinePrecision] + -0.25), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\frac{N}{-1 - \frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.3333333333333333\right), -0.25\right)}{N \cdot \left(N \cdot N\right)}}}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
Simplified96.2%
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6496.3
lift-+.f64N/A
+-commutativeN/A
lower-+.f6496.3
Applied egg-rr96.3%
Taylor expanded in N around 0
lower-/.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6496.3
Simplified96.3%
Final simplification96.3%
(FPCore (N) :precision binary64 (/ (+ (/ (fma N (fma N -0.5 0.3333333333333333) -0.25) (* N (* N N))) 1.0) N))
double code(double N) {
return ((fma(N, fma(N, -0.5, 0.3333333333333333), -0.25) / (N * (N * N))) + 1.0) / N;
}
function code(N) return Float64(Float64(Float64(fma(N, fma(N, -0.5, 0.3333333333333333), -0.25) / Float64(N * Float64(N * N))) + 1.0) / N) end
code[N_] := N[(N[(N[(N[(N * N[(N * -0.5 + 0.3333333333333333), $MachinePrecision] + -0.25), $MachinePrecision] / N[(N * N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\mathsf{fma}\left(N, \mathsf{fma}\left(N, -0.5, 0.3333333333333333\right), -0.25\right)}{N \cdot \left(N \cdot N\right)} + 1}{N}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
Simplified96.2%
Taylor expanded in N around 0
lower-/.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6496.2
Simplified96.2%
Final simplification96.2%
(FPCore (N) :precision binary64 (/ (+ (/ (+ -0.5 (/ 0.3333333333333333 N)) N) 1.0) N))
double code(double N) {
return (((-0.5 + (0.3333333333333333 / N)) / N) + 1.0) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = ((((-0.5d0) + (0.3333333333333333d0 / n)) / n) + 1.0d0) / n
end function
public static double code(double N) {
return (((-0.5 + (0.3333333333333333 / N)) / N) + 1.0) / N;
}
def code(N): return (((-0.5 + (0.3333333333333333 / N)) / N) + 1.0) / N
function code(N) return Float64(Float64(Float64(Float64(-0.5 + Float64(0.3333333333333333 / N)) / N) + 1.0) / N) end
function tmp = code(N) tmp = (((-0.5 + (0.3333333333333333 / N)) / N) + 1.0) / N; end
code[N_] := N[(N[(N[(N[(-0.5 + N[(0.3333333333333333 / N), $MachinePrecision]), $MachinePrecision] / N), $MachinePrecision] + 1.0), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-0.5 + \frac{0.3333333333333333}{N}}{N} + 1}{N}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
lower-/.f64N/A
Simplified95.0%
Final simplification95.0%
(FPCore (N) :precision binary64 (/ -1.0 (/ N (- -1.0 (/ -0.5 N)))))
double code(double N) {
return -1.0 / (N / (-1.0 - (-0.5 / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (-1.0d0) / (n / ((-1.0d0) - ((-0.5d0) / n)))
end function
public static double code(double N) {
return -1.0 / (N / (-1.0 - (-0.5 / N)));
}
def code(N): return -1.0 / (N / (-1.0 - (-0.5 / N)))
function code(N) return Float64(-1.0 / Float64(N / Float64(-1.0 - Float64(-0.5 / N)))) end
function tmp = code(N) tmp = -1.0 / (N / (-1.0 - (-0.5 / N))); end
code[N_] := N[(-1.0 / N[(N / N[(-1.0 - N[(-0.5 / N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\frac{N}{-1 - \frac{-0.5}{N}}}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
Simplified96.2%
Taylor expanded in N around -inf
associate-*r/N/A
lower-/.f64N/A
sub-negN/A
metadata-evalN/A
distribute-lft-inN/A
neg-mul-1N/A
metadata-evalN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f6492.4
Simplified92.4%
lift-/.f64N/A
lift-+.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6492.4
lift-+.f64N/A
+-commutativeN/A
lower-+.f6492.4
Applied egg-rr92.4%
Final simplification92.4%
(FPCore (N) :precision binary64 (/ (- 1.0 (/ 0.25 (* N N))) (+ N 0.5)))
double code(double N) {
return (1.0 - (0.25 / (N * N))) / (N + 0.5);
}
real(8) function code(n)
real(8), intent (in) :: n
code = (1.0d0 - (0.25d0 / (n * n))) / (n + 0.5d0)
end function
public static double code(double N) {
return (1.0 - (0.25 / (N * N))) / (N + 0.5);
}
def code(N): return (1.0 - (0.25 / (N * N))) / (N + 0.5)
function code(N) return Float64(Float64(1.0 - Float64(0.25 / Float64(N * N))) / Float64(N + 0.5)) end
function tmp = code(N) tmp = (1.0 - (0.25 / (N * N))) / (N + 0.5); end
code[N_] := N[(N[(1.0 - N[(0.25 / N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N + 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \frac{0.25}{N \cdot N}}{N + 0.5}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
Simplified96.2%
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied egg-rr96.2%
Taylor expanded in N around inf
lower-/.f64N/A
unpow2N/A
lower-*.f6494.9
Simplified94.9%
Taylor expanded in N around inf
distribute-rgt-inN/A
*-lft-identityN/A
associate-*l*N/A
lft-mult-inverseN/A
metadata-evalN/A
lower-+.f6492.4
Simplified92.4%
(FPCore (N) :precision binary64 (/ (+ (/ -0.5 N) 1.0) N))
double code(double N) {
return ((-0.5 / N) + 1.0) / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((-0.5d0) / n) + 1.0d0) / n
end function
public static double code(double N) {
return ((-0.5 / N) + 1.0) / N;
}
def code(N): return ((-0.5 / N) + 1.0) / N
function code(N) return Float64(Float64(Float64(-0.5 / N) + 1.0) / N) end
function tmp = code(N) tmp = ((-0.5 / N) + 1.0) / N; end
code[N_] := N[(N[(N[(-0.5 / N), $MachinePrecision] + 1.0), $MachinePrecision] / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-0.5}{N} + 1}{N}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
lower-/.f64N/A
sub-negN/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
distribute-neg-fracN/A
metadata-evalN/A
lower-/.f6492.4
Simplified92.4%
Final simplification92.4%
(FPCore (N) :precision binary64 (/ (+ N -0.5) (* N N)))
double code(double N) {
return (N + -0.5) / (N * N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = (n + (-0.5d0)) / (n * n)
end function
public static double code(double N) {
return (N + -0.5) / (N * N);
}
def code(N): return (N + -0.5) / (N * N)
function code(N) return Float64(Float64(N + -0.5) / Float64(N * N)) end
function tmp = code(N) tmp = (N + -0.5) / (N * N); end
code[N_] := N[(N[(N + -0.5), $MachinePrecision] / N[(N * N), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{N + -0.5}{N \cdot N}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
Simplified96.2%
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-+.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6496.3
lift-+.f64N/A
+-commutativeN/A
lower-+.f6496.3
Applied egg-rr96.3%
Taylor expanded in N around inf
div-subN/A
*-inversesN/A
associate-/r*N/A
unpow2N/A
associate-*r/N/A
metadata-evalN/A
associate-/r*N/A
unpow2N/A
div-subN/A
lower-/.f64N/A
sub-negN/A
metadata-evalN/A
lower-+.f64N/A
unpow2N/A
lower-*.f6492.1
Simplified92.1%
(FPCore (N) :precision binary64 (/ 1.0 N))
double code(double N) {
return 1.0 / N;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 1.0d0 / n
end function
public static double code(double N) {
return 1.0 / N;
}
def code(N): return 1.0 / N
function code(N) return Float64(1.0 / N) end
function tmp = code(N) tmp = 1.0 / N; end
code[N_] := N[(1.0 / N), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{N}
\end{array}
Initial program 22.6%
Taylor expanded in N around inf
lower-/.f6485.3
Simplified85.3%
(FPCore (N) :precision binary64 0.0)
double code(double N) {
return 0.0;
}
real(8) function code(n)
real(8), intent (in) :: n
code = 0.0d0
end function
public static double code(double N) {
return 0.0;
}
def code(N): return 0.0
function code(N) return 0.0 end
function tmp = code(N) tmp = 0.0; end
code[N_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 22.6%
lift-+.f64N/A
lift-log.f64N/A
lift-log.f64N/A
lift--.f6422.6
lift-log.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-log1p.f6422.6
Applied egg-rr22.6%
lift-log1p.f64N/A
lift-log.f64N/A
flip--N/A
unpow2N/A
lift-pow.f64N/A
unpow2N/A
lift-pow.f64N/A
lift-log1p.f64N/A
lift-log.f64N/A
sum-logN/A
+-commutativeN/A
distribute-lft1-inN/A
lift-fma.f64N/A
lift-log.f64N/A
sub-divN/A
lift-pow.f64N/A
unpow2N/A
associate-*r/N/A
lift-/.f64N/A
lift-/.f64N/A
Applied egg-rr22.6%
Taylor expanded in N around inf
distribute-rgt-outN/A
metadata-evalN/A
mul0-rgt3.3
Simplified3.3%
(FPCore (N) :precision binary64 (log1p (/ 1.0 N)))
double code(double N) {
return log1p((1.0 / N));
}
public static double code(double N) {
return Math.log1p((1.0 / N));
}
def code(N): return math.log1p((1.0 / N))
function code(N) return log1p(Float64(1.0 / N)) end
code[N_] := N[Log[1 + N[(1.0 / N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\mathsf{log1p}\left(\frac{1}{N}\right)
\end{array}
(FPCore (N) :precision binary64 (log (+ 1.0 (/ 1.0 N))))
double code(double N) {
return log((1.0 + (1.0 / N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = log((1.0d0 + (1.0d0 / n)))
end function
public static double code(double N) {
return Math.log((1.0 + (1.0 / N)));
}
def code(N): return math.log((1.0 + (1.0 / N)))
function code(N) return log(Float64(1.0 + Float64(1.0 / N))) end
function tmp = code(N) tmp = log((1.0 + (1.0 / N))); end
code[N_] := N[Log[N[(1.0 + N[(1.0 / N), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(1 + \frac{1}{N}\right)
\end{array}
(FPCore (N) :precision binary64 (+ (+ (+ (/ 1.0 N) (/ -1.0 (* 2.0 (pow N 2.0)))) (/ 1.0 (* 3.0 (pow N 3.0)))) (/ -1.0 (* 4.0 (pow N 4.0)))))
double code(double N) {
return (((1.0 / N) + (-1.0 / (2.0 * pow(N, 2.0)))) + (1.0 / (3.0 * pow(N, 3.0)))) + (-1.0 / (4.0 * pow(N, 4.0)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = (((1.0d0 / n) + ((-1.0d0) / (2.0d0 * (n ** 2.0d0)))) + (1.0d0 / (3.0d0 * (n ** 3.0d0)))) + ((-1.0d0) / (4.0d0 * (n ** 4.0d0)))
end function
public static double code(double N) {
return (((1.0 / N) + (-1.0 / (2.0 * Math.pow(N, 2.0)))) + (1.0 / (3.0 * Math.pow(N, 3.0)))) + (-1.0 / (4.0 * Math.pow(N, 4.0)));
}
def code(N): return (((1.0 / N) + (-1.0 / (2.0 * math.pow(N, 2.0)))) + (1.0 / (3.0 * math.pow(N, 3.0)))) + (-1.0 / (4.0 * math.pow(N, 4.0)))
function code(N) return Float64(Float64(Float64(Float64(1.0 / N) + Float64(-1.0 / Float64(2.0 * (N ^ 2.0)))) + Float64(1.0 / Float64(3.0 * (N ^ 3.0)))) + Float64(-1.0 / Float64(4.0 * (N ^ 4.0)))) end
function tmp = code(N) tmp = (((1.0 / N) + (-1.0 / (2.0 * (N ^ 2.0)))) + (1.0 / (3.0 * (N ^ 3.0)))) + (-1.0 / (4.0 * (N ^ 4.0))); end
code[N_] := N[(N[(N[(N[(1.0 / N), $MachinePrecision] + N[(-1.0 / N[(2.0 * N[Power[N, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(3.0 * N[Power[N, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[(4.0 * N[Power[N, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\frac{1}{N} + \frac{-1}{2 \cdot {N}^{2}}\right) + \frac{1}{3 \cdot {N}^{3}}\right) + \frac{-1}{4 \cdot {N}^{4}}
\end{array}
herbie shell --seed 2024215
(FPCore (N)
:name "2log (problem 3.3.6)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+40))
:alt
(! :herbie-platform default (log1p (/ 1 N)))
:alt
(! :herbie-platform default (log (+ 1 (/ 1 N))))
:alt
(! :herbie-platform default (+ (/ 1 N) (/ -1 (* 2 (pow N 2))) (/ 1 (* 3 (pow N 3))) (/ -1 (* 4 (pow N 4)))))
(- (log (+ N 1.0)) (log N)))