
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
\end{array}
(FPCore (x) :precision binary64 (* (- (* (log1p x) 2.0) (log1p (* (- x) x))) 0.5))
double code(double x) {
return ((log1p(x) * 2.0) - log1p((-x * x))) * 0.5;
}
public static double code(double x) {
return ((Math.log1p(x) * 2.0) - Math.log1p((-x * x))) * 0.5;
}
def code(x): return ((math.log1p(x) * 2.0) - math.log1p((-x * x))) * 0.5
function code(x) return Float64(Float64(Float64(log1p(x) * 2.0) - log1p(Float64(Float64(-x) * x))) * 0.5) end
code[x_] := N[(N[(N[(N[Log[1 + x], $MachinePrecision] * 2.0), $MachinePrecision] - N[Log[1 + N[((-x) * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{log1p}\left(x\right) \cdot 2 - \mathsf{log1p}\left(\left(-x\right) \cdot x\right)\right) \cdot 0.5
\end{array}
Initial program 9.0%
Applied rewrites100.0%
lift-/.f64N/A
metadata-eval100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (fma (* (fma (fma 0.14285714285714285 (* x x) 0.2) (* x x) 0.3333333333333333) (* x x)) x x))
double code(double x) {
return fma((fma(fma(0.14285714285714285, (x * x), 0.2), (x * x), 0.3333333333333333) * (x * x)), x, x);
}
function code(x) return fma(Float64(fma(fma(0.14285714285714285, Float64(x * x), 0.2), Float64(x * x), 0.3333333333333333) * Float64(x * x)), x, x) end
code[x_] := N[(N[(N[(N[(0.14285714285714285 * N[(x * x), $MachinePrecision] + 0.2), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.14285714285714285, x \cdot x, 0.2\right), x \cdot x, 0.3333333333333333\right) \cdot \left(x \cdot x\right), x, x\right)
\end{array}
Initial program 9.0%
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.8
Applied rewrites99.8%
Applied rewrites99.8%
(FPCore (x) :precision binary64 (fma (* (fma 0.2 (* x x) 0.3333333333333333) (* x x)) x x))
double code(double x) {
return fma((fma(0.2, (x * x), 0.3333333333333333) * (x * x)), x, x);
}
function code(x) return fma(Float64(fma(0.2, Float64(x * x), 0.3333333333333333) * Float64(x * x)), x, x) end
code[x_] := N[(N[(N[(0.2 * N[(x * x), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.2, x \cdot x, 0.3333333333333333\right) \cdot \left(x \cdot x\right), x, x\right)
\end{array}
Initial program 9.0%
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.8
Applied rewrites99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites99.7%
(FPCore (x) :precision binary64 (fma (* (* x x) x) 0.3333333333333333 x))
double code(double x) {
return fma(((x * x) * x), 0.3333333333333333, x);
}
function code(x) return fma(Float64(Float64(x * x) * x), 0.3333333333333333, x) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * 0.3333333333333333 + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, 0.3333333333333333, x\right)
\end{array}
Initial program 9.0%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f6499.4
Applied rewrites99.4%
Applied rewrites99.4%
(FPCore (x) :precision binary64 (* (* x 2.0) 0.5))
double code(double x) {
return (x * 2.0) * 0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 2.0d0) * 0.5d0
end function
public static double code(double x) {
return (x * 2.0) * 0.5;
}
def code(x): return (x * 2.0) * 0.5
function code(x) return Float64(Float64(x * 2.0) * 0.5) end
function tmp = code(x) tmp = (x * 2.0) * 0.5; end
code[x_] := N[(N[(x * 2.0), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot 2\right) \cdot 0.5
\end{array}
Initial program 9.0%
Taylor expanded in x around 0
lower-*.f6499.0
Applied rewrites99.0%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.0
lift-/.f64N/A
metadata-evalN/A
Applied rewrites99.0%
herbie shell --seed 2024304
(FPCore (x)
:name "Hyperbolic arc-(co)tangent"
:precision binary64
(* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))